repo_name
stringclasses
6 values
pr_number
int64
99
20.3k
pr_title
stringlengths
8
158
pr_description
stringlengths
0
6.54k
author
stringlengths
4
18
date_created
unknown
date_merged
unknown
previous_commit
stringlengths
40
40
pr_commit
stringlengths
40
40
query
stringlengths
37
6.57k
filepath
stringlengths
8
153
before_content
stringlengths
0
876M
after_content
stringlengths
0
876M
label
int64
-1
1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./src/transformers/models/bert/convert_bert_original_tf_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert BERT checkpoint.""" import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path): # Initialise PyTorch model config = BertConfig.from_json_file(bert_config_file) print(f"Building PyTorch model from configuration: {config}") model = BertForPreTraining(config) # Load weights from tf checkpoint load_tf_weights_in_bert(model, config, tf_checkpoint_path) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}") torch.save(model.state_dict(), pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--bert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert BERT checkpoint.""" import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path): # Initialise PyTorch model config = BertConfig.from_json_file(bert_config_file) print(f"Building PyTorch model from configuration: {config}") model = BertForPreTraining(config) # Load weights from tf checkpoint load_tf_weights_in_bert(model, config, tf_checkpoint_path) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}") torch.save(model.state_dict(), pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--bert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./.git/objects/ca/ece4f6fdbb73a0c60074df3434b6bd08834251
x+)JMU0d01ļԢb7/(zv^^ޥ
x+)JMU0d01ļԢb7/(zv^^ޥ
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./tests/models/segformer/test_feature_extraction_segformer.py
# coding=utf-8 # Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import SegformerFeatureExtractor class SegformerFeatureExtractionTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], do_reduce_labels=False, ): size = size if size is not None else {"height": 30, "width": 30} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_reduce_labels = do_reduce_labels def prepare_feat_extract_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def prepare_semantic_single_inputs(): dataset = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") image = Image.open(dataset[0]["file"]) map = Image.open(dataset[1]["file"]) return image, map def prepare_semantic_batch_inputs(): dataset = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") image1 = Image.open(dataset[0]["file"]) map1 = Image.open(dataset[1]["file"]) image2 = Image.open(dataset[2]["file"]) map2 = Image.open(dataset[3]["file"]) return [image1, image2], [map1, map2] @require_torch @require_vision class SegformerFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = SegformerFeatureExtractor if is_vision_available() else None def setUp(self): self.feature_extract_tester = SegformerFeatureExtractionTester(self) @property def feat_extract_dict(self): return self.feature_extract_tester.prepare_feat_extract_dict() def test_feat_extract_properties(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) self.assertTrue(hasattr(feature_extractor, "do_resize")) self.assertTrue(hasattr(feature_extractor, "size")) self.assertTrue(hasattr(feature_extractor, "do_normalize")) self.assertTrue(hasattr(feature_extractor, "image_mean")) self.assertTrue(hasattr(feature_extractor, "image_std")) self.assertTrue(hasattr(feature_extractor, "do_reduce_labels")) def test_batch_feature(self): pass def test_call_pil(self): # Initialize feature_extractor feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) # create random PIL images image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.feature_extract_tester.num_channels, self.feature_extract_tester.size["height"], self.feature_extract_tester.size["width"], ), ) # Test batched encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.feature_extract_tester.batch_size, self.feature_extract_tester.num_channels, self.feature_extract_tester.size["height"], self.feature_extract_tester.size["width"], ), ) def test_call_numpy(self): # Initialize feature_extractor feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) # create random numpy tensors image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.feature_extract_tester.num_channels, self.feature_extract_tester.size["height"], self.feature_extract_tester.size["width"], ), ) # Test batched encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.feature_extract_tester.batch_size, self.feature_extract_tester.num_channels, self.feature_extract_tester.size["height"], self.feature_extract_tester.size["width"], ), ) def test_call_pytorch(self): # Initialize feature_extractor feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) # create random PyTorch tensors image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.feature_extract_tester.num_channels, self.feature_extract_tester.size["height"], self.feature_extract_tester.size["width"], ), ) # Test batched encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.feature_extract_tester.batch_size, self.feature_extract_tester.num_channels, self.feature_extract_tester.size["height"], self.feature_extract_tester.size["width"], ), ) def test_call_segmentation_maps(self): # Initialize feature_extractor feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) # create random PyTorch tensors image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) maps = [] for image in image_inputs: self.assertIsInstance(image, torch.Tensor) maps.append(torch.zeros(image.shape[-2:]).long()) # Test not batched input encoding = feature_extractor(image_inputs[0], maps[0], return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( 1, self.feature_extract_tester.num_channels, self.feature_extract_tester.size["height"], self.feature_extract_tester.size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( 1, self.feature_extract_tester.size["height"], self.feature_extract_tester.size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) # Test batched encoding = feature_extractor(image_inputs, maps, return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( self.feature_extract_tester.batch_size, self.feature_extract_tester.num_channels, self.feature_extract_tester.size["height"], self.feature_extract_tester.size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( self.feature_extract_tester.batch_size, self.feature_extract_tester.size["height"], self.feature_extract_tester.size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) # Test not batched input (PIL images) image, segmentation_map = prepare_semantic_single_inputs() encoding = feature_extractor(image, segmentation_map, return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( 1, self.feature_extract_tester.num_channels, self.feature_extract_tester.size["height"], self.feature_extract_tester.size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( 1, self.feature_extract_tester.size["height"], self.feature_extract_tester.size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) # Test batched input (PIL images) images, segmentation_maps = prepare_semantic_batch_inputs() encoding = feature_extractor(images, segmentation_maps, return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( 2, self.feature_extract_tester.num_channels, self.feature_extract_tester.size["height"], self.feature_extract_tester.size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( 2, self.feature_extract_tester.size["height"], self.feature_extract_tester.size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) def test_reduce_labels(self): # Initialize feature_extractor feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 image, map = prepare_semantic_single_inputs() encoding = feature_extractor(image, map, return_tensors="pt") self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 150) feature_extractor.reduce_labels = True encoding = feature_extractor(image, map, return_tensors="pt") self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255)
# coding=utf-8 # Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import SegformerFeatureExtractor class SegformerFeatureExtractionTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], do_reduce_labels=False, ): size = size if size is not None else {"height": 30, "width": 30} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_reduce_labels = do_reduce_labels def prepare_feat_extract_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def prepare_semantic_single_inputs(): dataset = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") image = Image.open(dataset[0]["file"]) map = Image.open(dataset[1]["file"]) return image, map def prepare_semantic_batch_inputs(): dataset = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") image1 = Image.open(dataset[0]["file"]) map1 = Image.open(dataset[1]["file"]) image2 = Image.open(dataset[2]["file"]) map2 = Image.open(dataset[3]["file"]) return [image1, image2], [map1, map2] @require_torch @require_vision class SegformerFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = SegformerFeatureExtractor if is_vision_available() else None def setUp(self): self.feature_extract_tester = SegformerFeatureExtractionTester(self) @property def feat_extract_dict(self): return self.feature_extract_tester.prepare_feat_extract_dict() def test_feat_extract_properties(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) self.assertTrue(hasattr(feature_extractor, "do_resize")) self.assertTrue(hasattr(feature_extractor, "size")) self.assertTrue(hasattr(feature_extractor, "do_normalize")) self.assertTrue(hasattr(feature_extractor, "image_mean")) self.assertTrue(hasattr(feature_extractor, "image_std")) self.assertTrue(hasattr(feature_extractor, "do_reduce_labels")) def test_batch_feature(self): pass def test_call_pil(self): # Initialize feature_extractor feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) # create random PIL images image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.feature_extract_tester.num_channels, self.feature_extract_tester.size["height"], self.feature_extract_tester.size["width"], ), ) # Test batched encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.feature_extract_tester.batch_size, self.feature_extract_tester.num_channels, self.feature_extract_tester.size["height"], self.feature_extract_tester.size["width"], ), ) def test_call_numpy(self): # Initialize feature_extractor feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) # create random numpy tensors image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.feature_extract_tester.num_channels, self.feature_extract_tester.size["height"], self.feature_extract_tester.size["width"], ), ) # Test batched encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.feature_extract_tester.batch_size, self.feature_extract_tester.num_channels, self.feature_extract_tester.size["height"], self.feature_extract_tester.size["width"], ), ) def test_call_pytorch(self): # Initialize feature_extractor feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) # create random PyTorch tensors image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.feature_extract_tester.num_channels, self.feature_extract_tester.size["height"], self.feature_extract_tester.size["width"], ), ) # Test batched encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.feature_extract_tester.batch_size, self.feature_extract_tester.num_channels, self.feature_extract_tester.size["height"], self.feature_extract_tester.size["width"], ), ) def test_call_segmentation_maps(self): # Initialize feature_extractor feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) # create random PyTorch tensors image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) maps = [] for image in image_inputs: self.assertIsInstance(image, torch.Tensor) maps.append(torch.zeros(image.shape[-2:]).long()) # Test not batched input encoding = feature_extractor(image_inputs[0], maps[0], return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( 1, self.feature_extract_tester.num_channels, self.feature_extract_tester.size["height"], self.feature_extract_tester.size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( 1, self.feature_extract_tester.size["height"], self.feature_extract_tester.size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) # Test batched encoding = feature_extractor(image_inputs, maps, return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( self.feature_extract_tester.batch_size, self.feature_extract_tester.num_channels, self.feature_extract_tester.size["height"], self.feature_extract_tester.size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( self.feature_extract_tester.batch_size, self.feature_extract_tester.size["height"], self.feature_extract_tester.size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) # Test not batched input (PIL images) image, segmentation_map = prepare_semantic_single_inputs() encoding = feature_extractor(image, segmentation_map, return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( 1, self.feature_extract_tester.num_channels, self.feature_extract_tester.size["height"], self.feature_extract_tester.size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( 1, self.feature_extract_tester.size["height"], self.feature_extract_tester.size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) # Test batched input (PIL images) images, segmentation_maps = prepare_semantic_batch_inputs() encoding = feature_extractor(images, segmentation_maps, return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( 2, self.feature_extract_tester.num_channels, self.feature_extract_tester.size["height"], self.feature_extract_tester.size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( 2, self.feature_extract_tester.size["height"], self.feature_extract_tester.size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) def test_reduce_labels(self): # Initialize feature_extractor feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 image, map = prepare_semantic_single_inputs() encoding = feature_extractor(image, map, return_tensors="pt") self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 150) feature_extractor.reduce_labels = True encoding = feature_extractor(image, map, return_tensors="pt") self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255)
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./docs/source/en/add_new_model.mdx
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the --> # How to add a model to 🤗 Transformers? The 🤗 Transformers library is often able to offer new models thanks to community contributors. But this can be a challenging project and requires an in-depth knowledge of the 🤗 Transformers library and the model to implement. At Hugging Face, we're trying to empower more of the community to actively add models and we've put together this guide to walk you through the process of adding a PyTorch model (make sure you have [PyTorch installed](https://pytorch.org/get-started/locally/)). <Tip> If you're interested in implementing a TensorFlow model, take a look at the [How to convert a 🤗 Transformers model to TensorFlow](add_tensorflow_model) guide! </Tip> Along the way, you'll: - get insights into open-source best practices - understand the design principles behind one of the most popular deep learning libraries - learn how to efficiently test large models - learn how to integrate Python utilities like `black`, `isort`, and `make fix-copies` to ensure clean and readable code A Hugging Face team member will be available to help you along the way so you'll never be alone. 🤗 ❤️ To get started, open a [New model addition](https://github.com/huggingface/transformers/issues/new?assignees=&labels=New+model&template=new-model-addition.yml) issue for the model you want to see in 🤗 Transformers. If you're not especially picky about contributing a specific model, you can filter by the [New model label](https://github.com/huggingface/transformers/labels/New%20model) to see if there are any unclaimed model requests and work on it. Once you've opened a new model request, the first step is to get familiar with 🤗 Transformers if you aren't already! ## General overview of 🤗 Transformers First, you should get a general overview of 🤗 Transformers. 🤗 Transformers is a very opinionated library, so there is a chance that you don't agree with some of the library's philosophies or design choices. From our experience, however, we found that the fundamental design choices and philosophies of the library are crucial to efficiently scale 🤗 Transformers while keeping maintenance costs at a reasonable level. A good first starting point to better understand the library is to read the [documentation of our philosophy](philosophy). As a result of our way of working, there are some choices that we try to apply to all models: - Composition is generally favored over-abstraction - Duplicating code is not always bad if it strongly improves the readability or accessibility of a model - Model files are as self-contained as possible so that when you read the code of a specific model, you ideally only have to look into the respective `modeling_....py` file. In our opinion, the library's code is not just a means to provide a product, *e.g.* the ability to use BERT for inference, but also as the very product that we want to improve. Hence, when adding a model, the user is not only the person that will use your model, but also everybody that will read, try to understand, and possibly tweak your code. With this in mind, let's go a bit deeper into the general library design. ### Overview of models To successfully add a model, it is important to understand the interaction between your model and its config, [`PreTrainedModel`], and [`PretrainedConfig`]. For exemplary purposes, we will call the model to be added to 🤗 Transformers `BrandNewBert`. Let's take a look: <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers_overview.png"/> As you can see, we do make use of inheritance in 🤗 Transformers, but we keep the level of abstraction to an absolute minimum. There are never more than two levels of abstraction for any model in the library. `BrandNewBertModel` inherits from `BrandNewBertPreTrainedModel` which in turn inherits from [`PreTrainedModel`] and that's it. As a general rule, we want to make sure that a new model only depends on [`PreTrainedModel`]. The important functionalities that are automatically provided to every new model are [`~PreTrainedModel.from_pretrained`] and [`~PreTrainedModel.save_pretrained`], which are used for serialization and deserialization. All of the other important functionalities, such as `BrandNewBertModel.forward` should be completely defined in the new `modeling_brand_new_bert.py` script. Next, we want to make sure that a model with a specific head layer, such as `BrandNewBertForMaskedLM` does not inherit from `BrandNewBertModel`, but rather uses `BrandNewBertModel` as a component that can be called in its forward pass to keep the level of abstraction low. Every new model requires a configuration class, called `BrandNewBertConfig`. This configuration is always stored as an attribute in [`PreTrainedModel`], and thus can be accessed via the `config` attribute for all classes inheriting from `BrandNewBertPreTrainedModel`: ```python model = BrandNewBertModel.from_pretrained("brandy/brand_new_bert") model.config # model has access to its config ``` Similar to the model, the configuration inherits basic serialization and deserialization functionalities from [`PretrainedConfig`]. Note that the configuration and the model are always serialized into two different formats - the model to a *pytorch_model.bin* file and the configuration to a *config.json* file. Calling [`~PreTrainedModel.save_pretrained`] will automatically call [`~PretrainedConfig.save_pretrained`], so that both model and configuration are saved. ### Code style When coding your new model, keep in mind that Transformers is an opinionated library and we have a few quirks of our own regarding how code should be written :-) 1. The forward pass of your model should be fully written in the modeling file while being fully independent of other models in the library. If you want to reuse a block from another model, copy the code and paste it with a `# Copied from` comment on top (see [here](https://github.com/huggingface/transformers/blob/v4.17.0/src/transformers/models/roberta/modeling_roberta.py#L160) for a good example). 2. The code should be fully understandable, even by a non-native English speaker. This means you should pick descriptive variable names and avoid abbreviations. As an example, `activation` is preferred to `act`. One-letter variable names are strongly discouraged unless it's an index in a for loop. 3. More generally we prefer longer explicit code to short magical one. 4. Avoid subclassing `nn.Sequential` in PyTorch but subclass `nn.Module` and write the forward pass, so that anyone using your code can quickly debug it by adding print statements or breaking points. 5. Your function signature should be type-annotated. For the rest, good variable names are way more readable and understandable than type annotations. ### Overview of tokenizers Not quite ready yet :-( This section will be added soon! ## Step-by-step recipe to add a model to 🤗 Transformers Everyone has different preferences of how to port a model so it can be very helpful for you to take a look at summaries of how other contributors ported models to Hugging Face. Here is a list of community blog posts on how to port a model: 1. [Porting GPT2 Model](https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28) by [Thomas](https://huggingface.co/thomwolf) 2. [Porting WMT19 MT Model](https://huggingface.co/blog/porting-fsmt) by [Stas](https://huggingface.co/stas) From experience, we can tell you that the most important things to keep in mind when adding a model are: - Don't reinvent the wheel! Most parts of the code you will add for the new 🤗 Transformers model already exist somewhere in 🤗 Transformers. Take some time to find similar, already existing models and tokenizers you can copy from. [grep](https://www.gnu.org/software/grep/) and [rg](https://github.com/BurntSushi/ripgrep) are your friends. Note that it might very well happen that your model's tokenizer is based on one model implementation, and your model's modeling code on another one. *E.g.* FSMT's modeling code is based on BART, while FSMT's tokenizer code is based on XLM. - It's more of an engineering challenge than a scientific challenge. You should spend more time on creating an efficient debugging environment than trying to understand all theoretical aspects of the model in the paper. - Ask for help, when you're stuck! Models are the core component of 🤗 Transformers so that we at Hugging Face are more than happy to help you at every step to add your model. Don't hesitate to ask if you notice you are not making progress. In the following, we try to give you a general recipe that we found most useful when porting a model to 🤗 Transformers. The following list is a summary of everything that has to be done to add a model and can be used by you as a To-Do List: ☐ (Optional) Understood the model's theoretical aspects<br> ☐ Prepared 🤗 Transformers dev environment<br> ☐ Set up debugging environment of the original repository<br> ☐ Created script that successfully runs the `forward()` pass using the original repository and checkpoint<br> ☐ Successfully added the model skeleton to 🤗 Transformers<br> ☐ Successfully converted original checkpoint to 🤗 Transformers checkpoint<br> ☐ Successfully ran `forward()` pass in 🤗 Transformers that gives identical output to original checkpoint<br> ☐ Finished model tests in 🤗 Transformers<br> ☐ Successfully added tokenizer in 🤗 Transformers<br> ☐ Run end-to-end integration tests<br> ☐ Finished docs<br> ☐ Uploaded model weights to the Hub<br> ☐ Submitted the pull request<br> ☐ (Optional) Added a demo notebook To begin with, we usually recommend to start by getting a good theoretical understanding of `BrandNewBert`. However, if you prefer to understand the theoretical aspects of the model *on-the-job*, then it is totally fine to directly dive into the `BrandNewBert`'s code-base. This option might suit you better, if your engineering skills are better than your theoretical skill, if you have trouble understanding `BrandNewBert`'s paper, or if you just enjoy programming much more than reading scientific papers. ### 1. (Optional) Theoretical aspects of BrandNewBert You should take some time to read *BrandNewBert's* paper, if such descriptive work exists. There might be large sections of the paper that are difficult to understand. If this is the case, this is fine - don't worry! The goal is not to get a deep theoretical understanding of the paper, but to extract the necessary information required to effectively re-implement the model in 🤗 Transformers. That being said, you don't have to spend too much time on the theoretical aspects, but rather focus on the practical ones, namely: - What type of model is *brand_new_bert*? BERT-like encoder-only model? GPT2-like decoder-only model? BART-like encoder-decoder model? Look at the [model_summary](model_summary) if you're not familiar with the differences between those. - What are the applications of *brand_new_bert*? Text classification? Text generation? Seq2Seq tasks, *e.g.,* summarization? - What is the novel feature of the model making it different from BERT/GPT-2/BART? - Which of the already existing [🤗 Transformers models](https://huggingface.co/transformers/#contents) is most similar to *brand_new_bert*? - What type of tokenizer is used? A sentencepiece tokenizer? Word piece tokenizer? Is it the same tokenizer as used for BERT or BART? After you feel like you have gotten a good overview of the architecture of the model, you might want to write to the Hugging Face team with any questions you might have. This might include questions regarding the model's architecture, its attention layer, etc. We will be more than happy to help you. ### 2. Next prepare your environment 1. Fork the [repository](https://github.com/huggingface/transformers) by clicking on the ‘Fork' button on the repository's page. This creates a copy of the code under your GitHub user account. 2. Clone your `transformers` fork to your local disk, and add the base repository as a remote: ```bash git clone https://github.com/[your Github handle]/transformers.git cd transformers git remote add upstream https://github.com/huggingface/transformers.git ``` 3. Set up a development environment, for instance by running the following command: ```bash python -m venv .env source .env/bin/activate pip install -e ".[dev]" ``` and return to the parent directory ```bash cd .. ``` 4. We recommend adding the PyTorch version of *brand_new_bert* to Transformers. To install PyTorch, please follow the instructions on https://pytorch.org/get-started/locally/. **Note:** You don't need to have CUDA installed. Making the new model work on CPU is sufficient. 5. To port *brand_new_bert*, you will also need access to its original repository: ```bash git clone https://github.com/org_that_created_brand_new_bert_org/brand_new_bert.git cd brand_new_bert pip install -e . ``` Now you have set up a development environment to port *brand_new_bert* to 🤗 Transformers. ### 3.-4. Run a pretrained checkpoint using the original repository At first, you will work on the original *brand_new_bert* repository. Often, the original implementation is very “researchy”. Meaning that documentation might be lacking and the code can be difficult to understand. But this should be exactly your motivation to reimplement *brand_new_bert*. At Hugging Face, one of our main goals is to *make people stand on the shoulders of giants* which translates here very well into taking a working model and rewriting it to make it as **accessible, user-friendly, and beautiful** as possible. This is the number-one motivation to re-implement models into 🤗 Transformers - trying to make complex new NLP technology accessible to **everybody**. You should start thereby by diving into the original repository. Successfully running the official pretrained model in the original repository is often **the most difficult** step. From our experience, it is very important to spend some time getting familiar with the original code-base. You need to figure out the following: - Where to find the pretrained weights? - How to load the pretrained weights into the corresponding model? - How to run the tokenizer independently from the model? - Trace one forward pass so that you know which classes and functions are required for a simple forward pass. Usually, you only have to reimplement those functions. - Be able to locate the important components of the model: Where is the model's class? Are there model sub-classes, *e.g.* EncoderModel, DecoderModel? Where is the self-attention layer? Are there multiple different attention layers, *e.g.* *self-attention*, *cross-attention*...? - How can you debug the model in the original environment of the repo? Do you have to add *print* statements, can you work with an interactive debugger like *ipdb*, or should you use an efficient IDE to debug the model, like PyCharm? It is very important that before you start the porting process, that you can **efficiently** debug code in the original repository! Also, remember that you are working with an open-source library, so do not hesitate to open an issue, or even a pull request in the original repository. The maintainers of this repository are most likely very happy about someone looking into their code! At this point, it is really up to you which debugging environment and strategy you prefer to use to debug the original model. We strongly advise against setting up a costly GPU environment, but simply work on a CPU both when starting to dive into the original repository and also when starting to write the 🤗 Transformers implementation of the model. Only at the very end, when the model has already been successfully ported to 🤗 Transformers, one should verify that the model also works as expected on GPU. In general, there are two possible debugging environments for running the original model - [Jupyter notebooks](https://jupyter.org/) / [google colab](https://colab.research.google.com/notebooks/intro.ipynb) - Local python scripts. Jupyter notebooks have the advantage that they allow for cell-by-cell execution which can be helpful to better split logical components from one another and to have faster debugging cycles as intermediate results can be stored. Also, notebooks are often easier to share with other contributors, which might be very helpful if you want to ask the Hugging Face team for help. If you are familiar with Jupiter notebooks, we strongly recommend you to work with them. The obvious disadvantage of Jupyter notebooks is that if you are not used to working with them you will have to spend some time adjusting to the new programming environment and that you might not be able to use your known debugging tools anymore, like `ipdb`. For each code-base, a good first step is always to load a **small** pretrained checkpoint and to be able to reproduce a single forward pass using a dummy integer vector of input IDs as an input. Such a script could look like this (in pseudocode): ```python model = BrandNewBertModel.load_pretrained_checkpoint("/path/to/checkpoint/") input_ids = [0, 4, 5, 2, 3, 7, 9] # vector of input ids original_output = model.predict(input_ids) ``` Next, regarding the debugging strategy, there are generally a few from which to choose from: - Decompose the original model into many small testable components and run a forward pass on each of those for verification - Decompose the original model only into the original *tokenizer* and the original *model*, run a forward pass on those, and use intermediate print statements or breakpoints for verification Again, it is up to you which strategy to choose. Often, one or the other is advantageous depending on the original code base. If the original code-base allows you to decompose the model into smaller sub-components, *e.g.* if the original code-base can easily be run in eager mode, it is usually worth the effort to do so. There are some important advantages to taking the more difficult road in the beginning: - at a later stage when comparing the original model to the Hugging Face implementation, you can verify automatically for each component individually that the corresponding component of the 🤗 Transformers implementation matches instead of relying on visual comparison via print statements - it can give you some rope to decompose the big problem of porting a model into smaller problems of just porting individual components and thus structure your work better - separating the model into logical meaningful components will help you to get a better overview of the model's design and thus to better understand the model - at a later stage those component-by-component tests help you to ensure that no regression occurs as you continue changing your code [Lysandre's](https://gist.github.com/LysandreJik/db4c948f6b4483960de5cbac598ad4ed) integration checks for ELECTRA gives a nice example of how this can be done. However, if the original code-base is very complex or only allows intermediate components to be run in a compiled mode, it might be too time-consuming or even impossible to separate the model into smaller testable sub-components. A good example is [T5's MeshTensorFlow](https://github.com/tensorflow/mesh/tree/master/mesh_tensorflow) library which is very complex and does not offer a simple way to decompose the model into its sub-components. For such libraries, one often relies on verifying print statements. No matter which strategy you choose, the recommended procedure is often the same in that you should start to debug the starting layers first and the ending layers last. It is recommended that you retrieve the output, either by print statements or sub-component functions, of the following layers in the following order: 1. Retrieve the input IDs passed to the model 2. Retrieve the word embeddings 3. Retrieve the input of the first Transformer layer 4. Retrieve the output of the first Transformer layer 5. Retrieve the output of the following n - 1 Transformer layers 6. Retrieve the output of the whole BrandNewBert Model Input IDs should thereby consists of an array of integers, *e.g.* `input_ids = [0, 4, 4, 3, 2, 4, 1, 7, 19]` The outputs of the following layers often consist of multi-dimensional float arrays and can look like this: ``` [[ [-0.1465, -0.6501, 0.1993, ..., 0.1451, 0.3430, 0.6024], [-0.4417, -0.5920, 0.3450, ..., -0.3062, 0.6182, 0.7132], [-0.5009, -0.7122, 0.4548, ..., -0.3662, 0.6091, 0.7648], ..., [-0.5613, -0.6332, 0.4324, ..., -0.3792, 0.7372, 0.9288], [-0.5416, -0.6345, 0.4180, ..., -0.3564, 0.6992, 0.9191], [-0.5334, -0.6403, 0.4271, ..., -0.3339, 0.6533, 0.8694]]], ``` We expect that every model added to 🤗 Transformers passes a couple of integration tests, meaning that the original model and the reimplemented version in 🤗 Transformers have to give the exact same output up to a precision of 0.001! Since it is normal that the exact same model written in different libraries can give a slightly different output depending on the library framework, we accept an error tolerance of 1e-3 (0.001). It is not enough if the model gives nearly the same output, they have to be the almost identical. Therefore, you will certainly compare the intermediate outputs of the 🤗 Transformers version multiple times against the intermediate outputs of the original implementation of *brand_new_bert* in which case an **efficient** debugging environment of the original repository is absolutely important. Here is some advice is to make your debugging environment as efficient as possible. - Find the best way of debugging intermediate results. Is the original repository written in PyTorch? Then you should probably take the time to write a longer script that decomposes the original model into smaller sub-components to retrieve intermediate values. Is the original repository written in Tensorflow 1? Then you might have to rely on TensorFlow print operations like [tf.print](https://www.tensorflow.org/api_docs/python/tf/print) to output intermediate values. Is the original repository written in Jax? Then make sure that the model is **not jitted** when running the forward pass, *e.g.* check-out [this link](https://github.com/google/jax/issues/196). - Use the smallest pretrained checkpoint you can find. The smaller the checkpoint, the faster your debug cycle becomes. It is not efficient if your pretrained model is so big that your forward pass takes more than 10 seconds. In case only very large checkpoints are available, it might make more sense to create a dummy model in the new environment with randomly initialized weights and save those weights for comparison with the 🤗 Transformers version of your model - Make sure you are using the easiest way of calling a forward pass in the original repository. Ideally, you want to find the function in the original repository that **only** calls a single forward pass, *i.e.* that is often called `predict`, `evaluate`, `forward` or `__call__`. You don't want to debug a function that calls `forward` multiple times, *e.g.* to generate text, like `autoregressive_sample`, `generate`. - Try to separate the tokenization from the model's *forward* pass. If the original repository shows examples where you have to input a string, then try to find out where in the forward call the string input is changed to input ids and start from this point. This might mean that you have to possibly write a small script yourself or change the original code so that you can directly input the ids instead of an input string. - Make sure that the model in your debugging setup is **not** in training mode, which often causes the model to yield random outputs due to multiple dropout layers in the model. Make sure that the forward pass in your debugging environment is **deterministic** so that the dropout layers are not used. Or use *transformers.utils.set_seed* if the old and new implementations are in the same framework. The following section gives you more specific details/tips on how you can do this for *brand_new_bert*. ### 5.-14. Port BrandNewBert to 🤗 Transformers Next, you can finally start adding new code to 🤗 Transformers. Go into the clone of your 🤗 Transformers' fork: ```bash cd transformers ``` In the special case that you are adding a model whose architecture exactly matches the model architecture of an existing model you only have to add a conversion script as described in [this section](#write-a-conversion-script). In this case, you can just re-use the whole model architecture of the already existing model. Otherwise, let's start generating a new model. You have two choices here: - `transformers-cli add-new-model-like` to add a new model like an existing one - `transformers-cli add-new-model` to add a new model from our template (will look like BERT or Bart depending on the type of model you select) In both cases, you will be prompted with a questionnaire to fill the basic information of your model. The second command requires to install `cookiecutter`, you can find more information on it [here](https://github.com/huggingface/transformers/tree/main/templates/adding_a_new_model). **Open a Pull Request on the main huggingface/transformers repo** Before starting to adapt the automatically generated code, now is the time to open a “Work in progress (WIP)” pull request, *e.g.* “[WIP] Add *brand_new_bert*”, in 🤗 Transformers so that you and the Hugging Face team can work side-by-side on integrating the model into 🤗 Transformers. You should do the following: 1. Create a branch with a descriptive name from your main branch ```bash git checkout -b add_brand_new_bert ``` 2. Commit the automatically generated code: ```bash git add . git commit ``` 3. Fetch and rebase to current main ```bash git fetch upstream git rebase upstream/main ``` 4. Push the changes to your account using: ```bash git push -u origin a-descriptive-name-for-my-changes ``` 5. Once you are satisfied, go to the webpage of your fork on GitHub. Click on “Pull request”. Make sure to add the GitHub handle of some members of the Hugging Face team as reviewers, so that the Hugging Face team gets notified for future changes. 6. Change the PR into a draft by clicking on “Convert to draft” on the right of the GitHub pull request web page. In the following, whenever you have done some progress, don't forget to commit your work and push it to your account so that it shows in the pull request. Additionally, you should make sure to update your work with the current main from time to time by doing: ```bash git fetch upstream git merge upstream/main ``` In general, all questions you might have regarding the model or your implementation should be asked in your PR and discussed/solved in the PR. This way, the Hugging Face team will always be notified when you are committing new code or if you have a question. It is often very helpful to point the Hugging Face team to your added code so that the Hugging Face team can efficiently understand your problem or question. To do so, you can go to the “Files changed” tab where you see all of your changes, go to a line regarding which you want to ask a question, and click on the “+” symbol to add a comment. Whenever a question or problem has been solved, you can click on the “Resolve” button of the created comment. In the same way, the Hugging Face team will open comments when reviewing your code. We recommend asking most questions on GitHub on your PR. For some very general questions that are not very useful for the public, feel free to ping the Hugging Face team by Slack or email. **5. Adapt the generated models code for brand_new_bert** At first, we will focus only on the model itself and not care about the tokenizer. All the relevant code should be found in the generated files `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py` and `src/transformers/models/brand_new_bert/configuration_brand_new_bert.py`. Now you can finally start coding :). The generated code in `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py` will either have the same architecture as BERT if it's an encoder-only model or BART if it's an encoder-decoder model. At this point, you should remind yourself what you've learned in the beginning about the theoretical aspects of the model: *How is the model different from BERT or BART?*". Implement those changes which often means to change the *self-attention* layer, the order of the normalization layer, etc… Again, it is often useful to look at the similar architecture of already existing models in Transformers to get a better feeling of how your model should be implemented. **Note** that at this point, you don't have to be very sure that your code is fully correct or clean. Rather, it is advised to add a first *unclean*, copy-pasted version of the original code to `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py` until you feel like all the necessary code is added. From our experience, it is much more efficient to quickly add a first version of the required code and improve/correct the code iteratively with the conversion script as described in the next section. The only thing that has to work at this point is that you can instantiate the 🤗 Transformers implementation of *brand_new_bert*, *i.e.* the following command should work: ```python from transformers import BrandNewBertModel, BrandNewBertConfig model = BrandNewBertModel(BrandNewBertConfig()) ``` The above command will create a model according to the default parameters as defined in `BrandNewBertConfig()` with random weights, thus making sure that the `init()` methods of all components works. **6. Write a conversion script** Next, you should write a conversion script that lets you convert the checkpoint you used to debug *brand_new_bert* in the original repository to a checkpoint compatible with your just created 🤗 Transformers implementation of *brand_new_bert*. It is not advised to write the conversion script from scratch, but rather to look through already existing conversion scripts in 🤗 Transformers for one that has been used to convert a similar model that was written in the same framework as *brand_new_bert*. Usually, it is enough to copy an already existing conversion script and slightly adapt it for your use case. Don't hesitate to ask the Hugging Face team to point you to a similar already existing conversion script for your model. - If you are porting a model from TensorFlow to PyTorch, a good starting point might be BERT's conversion script [here](https://github.com/huggingface/transformers/blob/7acfa95afb8194f8f9c1f4d2c6028224dbed35a2/src/transformers/models/bert/modeling_bert.py#L91) - If you are porting a model from PyTorch to PyTorch, a good starting point might be BART's conversion script [here](https://github.com/huggingface/transformers/blob/main/src/transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py) In the following, we'll quickly explain how PyTorch models store layer weights and define layer names. In PyTorch, the name of a layer is defined by the name of the class attribute you give the layer. Let's define a dummy model in PyTorch, called `SimpleModel` as follows: ```python from torch import nn class SimpleModel(nn.Module): def __init__(self): super().__init__() self.dense = nn.Linear(10, 10) self.intermediate = nn.Linear(10, 10) self.layer_norm = nn.LayerNorm(10) ``` Now we can create an instance of this model definition which will fill all weights: `dense`, `intermediate`, `layer_norm` with random weights. We can print the model to see its architecture ```python model = SimpleModel() print(model) ``` This will print out the following: ``` SimpleModel( (dense): Linear(in_features=10, out_features=10, bias=True) (intermediate): Linear(in_features=10, out_features=10, bias=True) (layer_norm): LayerNorm((10,), eps=1e-05, elementwise_affine=True) ) ``` We can see that the layer names are defined by the name of the class attribute in PyTorch. You can print out the weight values of a specific layer: ```python print(model.dense.weight.data) ``` to see that the weights were randomly initialized ``` tensor([[-0.0818, 0.2207, -0.0749, -0.0030, 0.0045, -0.1569, -0.1598, 0.0212, -0.2077, 0.2157], [ 0.1044, 0.0201, 0.0990, 0.2482, 0.3116, 0.2509, 0.2866, -0.2190, 0.2166, -0.0212], [-0.2000, 0.1107, -0.1999, -0.3119, 0.1559, 0.0993, 0.1776, -0.1950, -0.1023, -0.0447], [-0.0888, -0.1092, 0.2281, 0.0336, 0.1817, -0.0115, 0.2096, 0.1415, -0.1876, -0.2467], [ 0.2208, -0.2352, -0.1426, -0.2636, -0.2889, -0.2061, -0.2849, -0.0465, 0.2577, 0.0402], [ 0.1502, 0.2465, 0.2566, 0.0693, 0.2352, -0.0530, 0.1859, -0.0604, 0.2132, 0.1680], [ 0.1733, -0.2407, -0.1721, 0.1484, 0.0358, -0.0633, -0.0721, -0.0090, 0.2707, -0.2509], [-0.1173, 0.1561, 0.2945, 0.0595, -0.1996, 0.2988, -0.0802, 0.0407, 0.1829, -0.1568], [-0.1164, -0.2228, -0.0403, 0.0428, 0.1339, 0.0047, 0.1967, 0.2923, 0.0333, -0.0536], [-0.1492, -0.1616, 0.1057, 0.1950, -0.2807, -0.2710, -0.1586, 0.0739, 0.2220, 0.2358]]). ``` In the conversion script, you should fill those randomly initialized weights with the exact weights of the corresponding layer in the checkpoint. *E.g.* ```python # retrieve matching layer weights, e.g. by # recursive algorithm layer_name = "dense" pretrained_weight = array_of_dense_layer model_pointer = getattr(model, "dense") model_pointer.weight.data = torch.from_numpy(pretrained_weight) ``` While doing so, you must verify that each randomly initialized weight of your PyTorch model and its corresponding pretrained checkpoint weight exactly match in both **shape and name**. To do so, it is **necessary** to add assert statements for the shape and print out the names of the checkpoints weights. E.g. you should add statements like: ```python assert ( model_pointer.weight.shape == pretrained_weight.shape ), f"Pointer shape of random weight {model_pointer.shape} and array shape of checkpoint weight {pretrained_weight.shape} mismatched" ``` Besides, you should also print out the names of both weights to make sure they match, *e.g.* ```python logger.info(f"Initialize PyTorch weight {layer_name} from {pretrained_weight.name}") ``` If either the shape or the name doesn't match, you probably assigned the wrong checkpoint weight to a randomly initialized layer of the 🤗 Transformers implementation. An incorrect shape is most likely due to an incorrect setting of the config parameters in `BrandNewBertConfig()` that do not exactly match those that were used for the checkpoint you want to convert. However, it could also be that PyTorch's implementation of a layer requires the weight to be transposed beforehand. Finally, you should also check that **all** required weights are initialized and print out all checkpoint weights that were not used for initialization to make sure the model is correctly converted. It is completely normal, that the conversion trials fail with either a wrong shape statement or wrong name assignment. This is most likely because either you used incorrect parameters in `BrandNewBertConfig()`, have a wrong architecture in the 🤗 Transformers implementation, you have a bug in the `init()` functions of one of the components of the 🤗 Transformers implementation or you need to transpose one of the checkpoint weights. This step should be iterated with the previous step until all weights of the checkpoint are correctly loaded in the Transformers model. Having correctly loaded the checkpoint into the 🤗 Transformers implementation, you can then save the model under a folder of your choice `/path/to/converted/checkpoint/folder` that should then contain both a `pytorch_model.bin` file and a `config.json` file: ```python model.save_pretrained("/path/to/converted/checkpoint/folder") ``` **7. Implement the forward pass** Having managed to correctly load the pretrained weights into the 🤗 Transformers implementation, you should now make sure that the forward pass is correctly implemented. In [Get familiar with the original repository](#run-a-pretrained-checkpoint-using-the-original-repository), you have already created a script that runs a forward pass of the model using the original repository. Now you should write an analogous script using the 🤗 Transformers implementation instead of the original one. It should look as follows: ```python model = BrandNewBertModel.from_pretrained("/path/to/converted/checkpoint/folder") input_ids = [0, 4, 4, 3, 2, 4, 1, 7, 19] output = model(input_ids).last_hidden_states ``` It is very likely that the 🤗 Transformers implementation and the original model implementation don't give the exact same output the very first time or that the forward pass throws an error. Don't be disappointed - it's expected! First, you should make sure that the forward pass doesn't throw any errors. It often happens that the wrong dimensions are used leading to a *Dimensionality mismatch* error or that the wrong data type object is used, *e.g.* `torch.long` instead of `torch.float32`. Don't hesitate to ask the Hugging Face team for help, if you don't manage to solve certain errors. The final part to make sure the 🤗 Transformers implementation works correctly is to ensure that the outputs are equivalent to a precision of `1e-3`. First, you should ensure that the output shapes are identical, *i.e.* `outputs.shape` should yield the same value for the script of the 🤗 Transformers implementation and the original implementation. Next, you should make sure that the output values are identical as well. This one of the most difficult parts of adding a new model. Common mistakes why the outputs are not identical are: - Some layers were not added, *i.e.* an *activation* layer was not added, or the residual connection was forgotten - The word embedding matrix was not tied - The wrong positional embeddings are used because the original implementation uses on offset - Dropout is applied during the forward pass. To fix this make sure *model.training is False* and that no dropout layer is falsely activated during the forward pass, *i.e.* pass *self.training* to [PyTorch's functional dropout](https://pytorch.org/docs/stable/nn.functional.html?highlight=dropout#torch.nn.functional.dropout) The best way to fix the problem is usually to look at the forward pass of the original implementation and the 🤗 Transformers implementation side-by-side and check if there are any differences. Ideally, you should debug/print out intermediate outputs of both implementations of the forward pass to find the exact position in the network where the 🤗 Transformers implementation shows a different output than the original implementation. First, make sure that the hard-coded `input_ids` in both scripts are identical. Next, verify that the outputs of the first transformation of the `input_ids` (usually the word embeddings) are identical. And then work your way up to the very last layer of the network. At some point, you will notice a difference between the two implementations, which should point you to the bug in the 🤗 Transformers implementation. From our experience, a simple and efficient way is to add many print statements in both the original implementation and 🤗 Transformers implementation, at the same positions in the network respectively, and to successively remove print statements showing the same values for intermediate presentations. When you're confident that both implementations yield the same output, verifying the outputs with `torch.allclose(original_output, output, atol=1e-3)`, you're done with the most difficult part! Congratulations - the work left to be done should be a cakewalk 😊. **8. Adding all necessary model tests** At this point, you have successfully added a new model. However, it is very much possible that the model does not yet fully comply with the required design. To make sure, the implementation is fully compatible with 🤗 Transformers, all common tests should pass. The Cookiecutter should have automatically added a test file for your model, probably under the same `tests/models/brand_new_bert/test_modeling_brand_new_bert.py`. Run this test file to verify that all common tests pass: ```bash pytest tests/models/brand_new_bert/test_modeling_brand_new_bert.py ``` Having fixed all common tests, it is now crucial to ensure that all the nice work you have done is well tested, so that - a) The community can easily understand your work by looking at specific tests of *brand_new_bert* - b) Future changes to your model will not break any important feature of the model. At first, integration tests should be added. Those integration tests essentially do the same as the debugging scripts you used earlier to implement the model to 🤗 Transformers. A template of those model tests is already added by the Cookiecutter, called `BrandNewBertModelIntegrationTests` and only has to be filled out by you. To ensure that those tests are passing, run ```bash RUN_SLOW=1 pytest -sv tests/models/brand_new_bert/test_modeling_brand_new_bert.py::BrandNewBertModelIntegrationTests ``` <Tip> In case you are using Windows, you should replace `RUN_SLOW=1` with `SET RUN_SLOW=1` </Tip> Second, all features that are special to *brand_new_bert* should be tested additionally in a separate test under `BrandNewBertModelTester`/``BrandNewBertModelTest`. This part is often forgotten but is extremely useful in two ways: - It helps to transfer the knowledge you have acquired during the model addition to the community by showing how the special features of *brand_new_bert* should work. - Future contributors can quickly test changes to the model by running those special tests. **9. Implement the tokenizer** Next, we should add the tokenizer of *brand_new_bert*. Usually, the tokenizer is equivalent or very similar to an already existing tokenizer of 🤗 Transformers. It is very important to find/extract the original tokenizer file and to manage to load this file into the 🤗 Transformers' implementation of the tokenizer. To ensure that the tokenizer works correctly, it is recommended to first create a script in the original repository that inputs a string and returns the `input_ids``. It could look similar to this (in pseudo-code): ```python input_str = "This is a long example input string containing special characters .$?-, numbers 2872 234 12 and words." model = BrandNewBertModel.load_pretrained_checkpoint("/path/to/checkpoint/") input_ids = model.tokenize(input_str) ``` You might have to take a deeper look again into the original repository to find the correct tokenizer function or you might even have to do changes to your clone of the original repository to only output the `input_ids`. Having written a functional tokenization script that uses the original repository, an analogous script for 🤗 Transformers should be created. It should look similar to this: ```python from transformers import BrandNewBertTokenizer input_str = "This is a long example input string containing special characters .$?-, numbers 2872 234 12 and words." tokenizer = BrandNewBertTokenizer.from_pretrained("/path/to/tokenizer/folder/") input_ids = tokenizer(input_str).input_ids ``` When both `input_ids` yield the same values, as a final step a tokenizer test file should also be added. Analogous to the modeling test files of *brand_new_bert*, the tokenization test files of *brand_new_bert* should contain a couple of hard-coded integration tests. **10. Run End-to-end integration tests** Having added the tokenizer, you should also add a couple of end-to-end integration tests using both the model and the tokenizer to `tests/models/brand_new_bert/test_modeling_brand_new_bert.py` in 🤗 Transformers. Such a test should show on a meaningful text-to-text sample that the 🤗 Transformers implementation works as expected. A meaningful text-to-text sample can include *e.g.* a source-to-target-translation pair, an article-to-summary pair, a question-to-answer pair, etc… If none of the ported checkpoints has been fine-tuned on a downstream task it is enough to simply rely on the model tests. In a final step to ensure that the model is fully functional, it is advised that you also run all tests on GPU. It can happen that you forgot to add some `.to(self.device)` statements to internal tensors of the model, which in such a test would show in an error. In case you have no access to a GPU, the Hugging Face team can take care of running those tests for you. **11. Add Docstring** Now, all the necessary functionality for *brand_new_bert* is added - you're almost done! The only thing left to add is a nice docstring and a doc page. The Cookiecutter should have added a template file called `docs/source/model_doc/brand_new_bert.mdx` that you should fill out. Users of your model will usually first look at this page before using your model. Hence, the documentation must be understandable and concise. It is very useful for the community to add some *Tips* to show how the model should be used. Don't hesitate to ping the Hugging Face team regarding the docstrings. Next, make sure that the docstring added to `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py` is correct and included all necessary inputs and outputs. We have a detailed guide about writing documentation and our docstring format [here](writing-documentation). It is always to good to remind oneself that documentation should be treated at least as carefully as the code in 🤗 Transformers since the documentation is usually the first contact point of the community with the model. **Code refactor** Great, now you have added all the necessary code for *brand_new_bert*. At this point, you should correct some potential incorrect code style by running: ```bash make style ``` and verify that your coding style passes the quality check: ```bash make quality ``` There are a couple of other very strict design tests in 🤗 Transformers that might still be failing, which shows up in the tests of your pull request. This is often because of some missing information in the docstring or some incorrect naming. The Hugging Face team will surely help you if you're stuck here. Lastly, it is always a good idea to refactor one's code after having ensured that the code works correctly. With all tests passing, now it's a good time to go over the added code again and do some refactoring. You have now finished the coding part, congratulation! 🎉 You are Awesome! 😎 **12. Upload the models to the model hub** In this final part, you should convert and upload all checkpoints to the model hub and add a model card for each uploaded model checkpoint. You can get familiar with the hub functionalities by reading our [Model sharing and uploading Page](model_sharing). You should work alongside the Hugging Face team here to decide on a fitting name for each checkpoint and to get the required access rights to be able to upload the model under the author's organization of *brand_new_bert*. The `push_to_hub` method, present in all models in `transformers`, is a quick and efficient way to push your checkpoint to the hub. A little snippet is pasted below: ```python brand_new_bert.push_to_hub("brand_new_bert") # Uncomment the following line to push to an organization. # brand_new_bert.push_to_hub("<organization>/brand_new_bert") ``` It is worth spending some time to create fitting model cards for each checkpoint. The model cards should highlight the specific characteristics of this particular checkpoint, *e.g.* On which dataset was the checkpoint pretrained/fine-tuned on? On what down-stream task should the model be used? And also include some code on how to correctly use the model. **13. (Optional) Add notebook** It is very helpful to add a notebook that showcases in-detail how *brand_new_bert* can be used for inference and/or fine-tuned on a downstream task. This is not mandatory to merge your PR, but very useful for the community. **14. Submit your finished PR** You're done programming now and can move to the last step, which is getting your PR merged into main. Usually, the Hugging Face team should have helped you already at this point, but it is worth taking some time to give your finished PR a nice description and eventually add comments to your code, if you want to point out certain design choices to your reviewer. ### Share your work!! Now, it's time to get some credit from the community for your work! Having completed a model addition is a major contribution to Transformers and the whole NLP community. Your code and the ported pre-trained models will certainly be used by hundreds and possibly even thousands of developers and researchers. You should be proud of your work and share your achievement with the community. **You have made another model that is super easy to access for everyone in the community! 🤯**
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the --> # How to add a model to 🤗 Transformers? The 🤗 Transformers library is often able to offer new models thanks to community contributors. But this can be a challenging project and requires an in-depth knowledge of the 🤗 Transformers library and the model to implement. At Hugging Face, we're trying to empower more of the community to actively add models and we've put together this guide to walk you through the process of adding a PyTorch model (make sure you have [PyTorch installed](https://pytorch.org/get-started/locally/)). <Tip> If you're interested in implementing a TensorFlow model, take a look at the [How to convert a 🤗 Transformers model to TensorFlow](add_tensorflow_model) guide! </Tip> Along the way, you'll: - get insights into open-source best practices - understand the design principles behind one of the most popular deep learning libraries - learn how to efficiently test large models - learn how to integrate Python utilities like `black`, `isort`, and `make fix-copies` to ensure clean and readable code A Hugging Face team member will be available to help you along the way so you'll never be alone. 🤗 ❤️ To get started, open a [New model addition](https://github.com/huggingface/transformers/issues/new?assignees=&labels=New+model&template=new-model-addition.yml) issue for the model you want to see in 🤗 Transformers. If you're not especially picky about contributing a specific model, you can filter by the [New model label](https://github.com/huggingface/transformers/labels/New%20model) to see if there are any unclaimed model requests and work on it. Once you've opened a new model request, the first step is to get familiar with 🤗 Transformers if you aren't already! ## General overview of 🤗 Transformers First, you should get a general overview of 🤗 Transformers. 🤗 Transformers is a very opinionated library, so there is a chance that you don't agree with some of the library's philosophies or design choices. From our experience, however, we found that the fundamental design choices and philosophies of the library are crucial to efficiently scale 🤗 Transformers while keeping maintenance costs at a reasonable level. A good first starting point to better understand the library is to read the [documentation of our philosophy](philosophy). As a result of our way of working, there are some choices that we try to apply to all models: - Composition is generally favored over-abstraction - Duplicating code is not always bad if it strongly improves the readability or accessibility of a model - Model files are as self-contained as possible so that when you read the code of a specific model, you ideally only have to look into the respective `modeling_....py` file. In our opinion, the library's code is not just a means to provide a product, *e.g.* the ability to use BERT for inference, but also as the very product that we want to improve. Hence, when adding a model, the user is not only the person that will use your model, but also everybody that will read, try to understand, and possibly tweak your code. With this in mind, let's go a bit deeper into the general library design. ### Overview of models To successfully add a model, it is important to understand the interaction between your model and its config, [`PreTrainedModel`], and [`PretrainedConfig`]. For exemplary purposes, we will call the model to be added to 🤗 Transformers `BrandNewBert`. Let's take a look: <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers_overview.png"/> As you can see, we do make use of inheritance in 🤗 Transformers, but we keep the level of abstraction to an absolute minimum. There are never more than two levels of abstraction for any model in the library. `BrandNewBertModel` inherits from `BrandNewBertPreTrainedModel` which in turn inherits from [`PreTrainedModel`] and that's it. As a general rule, we want to make sure that a new model only depends on [`PreTrainedModel`]. The important functionalities that are automatically provided to every new model are [`~PreTrainedModel.from_pretrained`] and [`~PreTrainedModel.save_pretrained`], which are used for serialization and deserialization. All of the other important functionalities, such as `BrandNewBertModel.forward` should be completely defined in the new `modeling_brand_new_bert.py` script. Next, we want to make sure that a model with a specific head layer, such as `BrandNewBertForMaskedLM` does not inherit from `BrandNewBertModel`, but rather uses `BrandNewBertModel` as a component that can be called in its forward pass to keep the level of abstraction low. Every new model requires a configuration class, called `BrandNewBertConfig`. This configuration is always stored as an attribute in [`PreTrainedModel`], and thus can be accessed via the `config` attribute for all classes inheriting from `BrandNewBertPreTrainedModel`: ```python model = BrandNewBertModel.from_pretrained("brandy/brand_new_bert") model.config # model has access to its config ``` Similar to the model, the configuration inherits basic serialization and deserialization functionalities from [`PretrainedConfig`]. Note that the configuration and the model are always serialized into two different formats - the model to a *pytorch_model.bin* file and the configuration to a *config.json* file. Calling [`~PreTrainedModel.save_pretrained`] will automatically call [`~PretrainedConfig.save_pretrained`], so that both model and configuration are saved. ### Code style When coding your new model, keep in mind that Transformers is an opinionated library and we have a few quirks of our own regarding how code should be written :-) 1. The forward pass of your model should be fully written in the modeling file while being fully independent of other models in the library. If you want to reuse a block from another model, copy the code and paste it with a `# Copied from` comment on top (see [here](https://github.com/huggingface/transformers/blob/v4.17.0/src/transformers/models/roberta/modeling_roberta.py#L160) for a good example). 2. The code should be fully understandable, even by a non-native English speaker. This means you should pick descriptive variable names and avoid abbreviations. As an example, `activation` is preferred to `act`. One-letter variable names are strongly discouraged unless it's an index in a for loop. 3. More generally we prefer longer explicit code to short magical one. 4. Avoid subclassing `nn.Sequential` in PyTorch but subclass `nn.Module` and write the forward pass, so that anyone using your code can quickly debug it by adding print statements or breaking points. 5. Your function signature should be type-annotated. For the rest, good variable names are way more readable and understandable than type annotations. ### Overview of tokenizers Not quite ready yet :-( This section will be added soon! ## Step-by-step recipe to add a model to 🤗 Transformers Everyone has different preferences of how to port a model so it can be very helpful for you to take a look at summaries of how other contributors ported models to Hugging Face. Here is a list of community blog posts on how to port a model: 1. [Porting GPT2 Model](https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28) by [Thomas](https://huggingface.co/thomwolf) 2. [Porting WMT19 MT Model](https://huggingface.co/blog/porting-fsmt) by [Stas](https://huggingface.co/stas) From experience, we can tell you that the most important things to keep in mind when adding a model are: - Don't reinvent the wheel! Most parts of the code you will add for the new 🤗 Transformers model already exist somewhere in 🤗 Transformers. Take some time to find similar, already existing models and tokenizers you can copy from. [grep](https://www.gnu.org/software/grep/) and [rg](https://github.com/BurntSushi/ripgrep) are your friends. Note that it might very well happen that your model's tokenizer is based on one model implementation, and your model's modeling code on another one. *E.g.* FSMT's modeling code is based on BART, while FSMT's tokenizer code is based on XLM. - It's more of an engineering challenge than a scientific challenge. You should spend more time on creating an efficient debugging environment than trying to understand all theoretical aspects of the model in the paper. - Ask for help, when you're stuck! Models are the core component of 🤗 Transformers so that we at Hugging Face are more than happy to help you at every step to add your model. Don't hesitate to ask if you notice you are not making progress. In the following, we try to give you a general recipe that we found most useful when porting a model to 🤗 Transformers. The following list is a summary of everything that has to be done to add a model and can be used by you as a To-Do List: ☐ (Optional) Understood the model's theoretical aspects<br> ☐ Prepared 🤗 Transformers dev environment<br> ☐ Set up debugging environment of the original repository<br> ☐ Created script that successfully runs the `forward()` pass using the original repository and checkpoint<br> ☐ Successfully added the model skeleton to 🤗 Transformers<br> ☐ Successfully converted original checkpoint to 🤗 Transformers checkpoint<br> ☐ Successfully ran `forward()` pass in 🤗 Transformers that gives identical output to original checkpoint<br> ☐ Finished model tests in 🤗 Transformers<br> ☐ Successfully added tokenizer in 🤗 Transformers<br> ☐ Run end-to-end integration tests<br> ☐ Finished docs<br> ☐ Uploaded model weights to the Hub<br> ☐ Submitted the pull request<br> ☐ (Optional) Added a demo notebook To begin with, we usually recommend to start by getting a good theoretical understanding of `BrandNewBert`. However, if you prefer to understand the theoretical aspects of the model *on-the-job*, then it is totally fine to directly dive into the `BrandNewBert`'s code-base. This option might suit you better, if your engineering skills are better than your theoretical skill, if you have trouble understanding `BrandNewBert`'s paper, or if you just enjoy programming much more than reading scientific papers. ### 1. (Optional) Theoretical aspects of BrandNewBert You should take some time to read *BrandNewBert's* paper, if such descriptive work exists. There might be large sections of the paper that are difficult to understand. If this is the case, this is fine - don't worry! The goal is not to get a deep theoretical understanding of the paper, but to extract the necessary information required to effectively re-implement the model in 🤗 Transformers. That being said, you don't have to spend too much time on the theoretical aspects, but rather focus on the practical ones, namely: - What type of model is *brand_new_bert*? BERT-like encoder-only model? GPT2-like decoder-only model? BART-like encoder-decoder model? Look at the [model_summary](model_summary) if you're not familiar with the differences between those. - What are the applications of *brand_new_bert*? Text classification? Text generation? Seq2Seq tasks, *e.g.,* summarization? - What is the novel feature of the model making it different from BERT/GPT-2/BART? - Which of the already existing [🤗 Transformers models](https://huggingface.co/transformers/#contents) is most similar to *brand_new_bert*? - What type of tokenizer is used? A sentencepiece tokenizer? Word piece tokenizer? Is it the same tokenizer as used for BERT or BART? After you feel like you have gotten a good overview of the architecture of the model, you might want to write to the Hugging Face team with any questions you might have. This might include questions regarding the model's architecture, its attention layer, etc. We will be more than happy to help you. ### 2. Next prepare your environment 1. Fork the [repository](https://github.com/huggingface/transformers) by clicking on the ‘Fork' button on the repository's page. This creates a copy of the code under your GitHub user account. 2. Clone your `transformers` fork to your local disk, and add the base repository as a remote: ```bash git clone https://github.com/[your Github handle]/transformers.git cd transformers git remote add upstream https://github.com/huggingface/transformers.git ``` 3. Set up a development environment, for instance by running the following command: ```bash python -m venv .env source .env/bin/activate pip install -e ".[dev]" ``` and return to the parent directory ```bash cd .. ``` 4. We recommend adding the PyTorch version of *brand_new_bert* to Transformers. To install PyTorch, please follow the instructions on https://pytorch.org/get-started/locally/. **Note:** You don't need to have CUDA installed. Making the new model work on CPU is sufficient. 5. To port *brand_new_bert*, you will also need access to its original repository: ```bash git clone https://github.com/org_that_created_brand_new_bert_org/brand_new_bert.git cd brand_new_bert pip install -e . ``` Now you have set up a development environment to port *brand_new_bert* to 🤗 Transformers. ### 3.-4. Run a pretrained checkpoint using the original repository At first, you will work on the original *brand_new_bert* repository. Often, the original implementation is very “researchy”. Meaning that documentation might be lacking and the code can be difficult to understand. But this should be exactly your motivation to reimplement *brand_new_bert*. At Hugging Face, one of our main goals is to *make people stand on the shoulders of giants* which translates here very well into taking a working model and rewriting it to make it as **accessible, user-friendly, and beautiful** as possible. This is the number-one motivation to re-implement models into 🤗 Transformers - trying to make complex new NLP technology accessible to **everybody**. You should start thereby by diving into the original repository. Successfully running the official pretrained model in the original repository is often **the most difficult** step. From our experience, it is very important to spend some time getting familiar with the original code-base. You need to figure out the following: - Where to find the pretrained weights? - How to load the pretrained weights into the corresponding model? - How to run the tokenizer independently from the model? - Trace one forward pass so that you know which classes and functions are required for a simple forward pass. Usually, you only have to reimplement those functions. - Be able to locate the important components of the model: Where is the model's class? Are there model sub-classes, *e.g.* EncoderModel, DecoderModel? Where is the self-attention layer? Are there multiple different attention layers, *e.g.* *self-attention*, *cross-attention*...? - How can you debug the model in the original environment of the repo? Do you have to add *print* statements, can you work with an interactive debugger like *ipdb*, or should you use an efficient IDE to debug the model, like PyCharm? It is very important that before you start the porting process, that you can **efficiently** debug code in the original repository! Also, remember that you are working with an open-source library, so do not hesitate to open an issue, or even a pull request in the original repository. The maintainers of this repository are most likely very happy about someone looking into their code! At this point, it is really up to you which debugging environment and strategy you prefer to use to debug the original model. We strongly advise against setting up a costly GPU environment, but simply work on a CPU both when starting to dive into the original repository and also when starting to write the 🤗 Transformers implementation of the model. Only at the very end, when the model has already been successfully ported to 🤗 Transformers, one should verify that the model also works as expected on GPU. In general, there are two possible debugging environments for running the original model - [Jupyter notebooks](https://jupyter.org/) / [google colab](https://colab.research.google.com/notebooks/intro.ipynb) - Local python scripts. Jupyter notebooks have the advantage that they allow for cell-by-cell execution which can be helpful to better split logical components from one another and to have faster debugging cycles as intermediate results can be stored. Also, notebooks are often easier to share with other contributors, which might be very helpful if you want to ask the Hugging Face team for help. If you are familiar with Jupiter notebooks, we strongly recommend you to work with them. The obvious disadvantage of Jupyter notebooks is that if you are not used to working with them you will have to spend some time adjusting to the new programming environment and that you might not be able to use your known debugging tools anymore, like `ipdb`. For each code-base, a good first step is always to load a **small** pretrained checkpoint and to be able to reproduce a single forward pass using a dummy integer vector of input IDs as an input. Such a script could look like this (in pseudocode): ```python model = BrandNewBertModel.load_pretrained_checkpoint("/path/to/checkpoint/") input_ids = [0, 4, 5, 2, 3, 7, 9] # vector of input ids original_output = model.predict(input_ids) ``` Next, regarding the debugging strategy, there are generally a few from which to choose from: - Decompose the original model into many small testable components and run a forward pass on each of those for verification - Decompose the original model only into the original *tokenizer* and the original *model*, run a forward pass on those, and use intermediate print statements or breakpoints for verification Again, it is up to you which strategy to choose. Often, one or the other is advantageous depending on the original code base. If the original code-base allows you to decompose the model into smaller sub-components, *e.g.* if the original code-base can easily be run in eager mode, it is usually worth the effort to do so. There are some important advantages to taking the more difficult road in the beginning: - at a later stage when comparing the original model to the Hugging Face implementation, you can verify automatically for each component individually that the corresponding component of the 🤗 Transformers implementation matches instead of relying on visual comparison via print statements - it can give you some rope to decompose the big problem of porting a model into smaller problems of just porting individual components and thus structure your work better - separating the model into logical meaningful components will help you to get a better overview of the model's design and thus to better understand the model - at a later stage those component-by-component tests help you to ensure that no regression occurs as you continue changing your code [Lysandre's](https://gist.github.com/LysandreJik/db4c948f6b4483960de5cbac598ad4ed) integration checks for ELECTRA gives a nice example of how this can be done. However, if the original code-base is very complex or only allows intermediate components to be run in a compiled mode, it might be too time-consuming or even impossible to separate the model into smaller testable sub-components. A good example is [T5's MeshTensorFlow](https://github.com/tensorflow/mesh/tree/master/mesh_tensorflow) library which is very complex and does not offer a simple way to decompose the model into its sub-components. For such libraries, one often relies on verifying print statements. No matter which strategy you choose, the recommended procedure is often the same in that you should start to debug the starting layers first and the ending layers last. It is recommended that you retrieve the output, either by print statements or sub-component functions, of the following layers in the following order: 1. Retrieve the input IDs passed to the model 2. Retrieve the word embeddings 3. Retrieve the input of the first Transformer layer 4. Retrieve the output of the first Transformer layer 5. Retrieve the output of the following n - 1 Transformer layers 6. Retrieve the output of the whole BrandNewBert Model Input IDs should thereby consists of an array of integers, *e.g.* `input_ids = [0, 4, 4, 3, 2, 4, 1, 7, 19]` The outputs of the following layers often consist of multi-dimensional float arrays and can look like this: ``` [[ [-0.1465, -0.6501, 0.1993, ..., 0.1451, 0.3430, 0.6024], [-0.4417, -0.5920, 0.3450, ..., -0.3062, 0.6182, 0.7132], [-0.5009, -0.7122, 0.4548, ..., -0.3662, 0.6091, 0.7648], ..., [-0.5613, -0.6332, 0.4324, ..., -0.3792, 0.7372, 0.9288], [-0.5416, -0.6345, 0.4180, ..., -0.3564, 0.6992, 0.9191], [-0.5334, -0.6403, 0.4271, ..., -0.3339, 0.6533, 0.8694]]], ``` We expect that every model added to 🤗 Transformers passes a couple of integration tests, meaning that the original model and the reimplemented version in 🤗 Transformers have to give the exact same output up to a precision of 0.001! Since it is normal that the exact same model written in different libraries can give a slightly different output depending on the library framework, we accept an error tolerance of 1e-3 (0.001). It is not enough if the model gives nearly the same output, they have to be the almost identical. Therefore, you will certainly compare the intermediate outputs of the 🤗 Transformers version multiple times against the intermediate outputs of the original implementation of *brand_new_bert* in which case an **efficient** debugging environment of the original repository is absolutely important. Here is some advice is to make your debugging environment as efficient as possible. - Find the best way of debugging intermediate results. Is the original repository written in PyTorch? Then you should probably take the time to write a longer script that decomposes the original model into smaller sub-components to retrieve intermediate values. Is the original repository written in Tensorflow 1? Then you might have to rely on TensorFlow print operations like [tf.print](https://www.tensorflow.org/api_docs/python/tf/print) to output intermediate values. Is the original repository written in Jax? Then make sure that the model is **not jitted** when running the forward pass, *e.g.* check-out [this link](https://github.com/google/jax/issues/196). - Use the smallest pretrained checkpoint you can find. The smaller the checkpoint, the faster your debug cycle becomes. It is not efficient if your pretrained model is so big that your forward pass takes more than 10 seconds. In case only very large checkpoints are available, it might make more sense to create a dummy model in the new environment with randomly initialized weights and save those weights for comparison with the 🤗 Transformers version of your model - Make sure you are using the easiest way of calling a forward pass in the original repository. Ideally, you want to find the function in the original repository that **only** calls a single forward pass, *i.e.* that is often called `predict`, `evaluate`, `forward` or `__call__`. You don't want to debug a function that calls `forward` multiple times, *e.g.* to generate text, like `autoregressive_sample`, `generate`. - Try to separate the tokenization from the model's *forward* pass. If the original repository shows examples where you have to input a string, then try to find out where in the forward call the string input is changed to input ids and start from this point. This might mean that you have to possibly write a small script yourself or change the original code so that you can directly input the ids instead of an input string. - Make sure that the model in your debugging setup is **not** in training mode, which often causes the model to yield random outputs due to multiple dropout layers in the model. Make sure that the forward pass in your debugging environment is **deterministic** so that the dropout layers are not used. Or use *transformers.utils.set_seed* if the old and new implementations are in the same framework. The following section gives you more specific details/tips on how you can do this for *brand_new_bert*. ### 5.-14. Port BrandNewBert to 🤗 Transformers Next, you can finally start adding new code to 🤗 Transformers. Go into the clone of your 🤗 Transformers' fork: ```bash cd transformers ``` In the special case that you are adding a model whose architecture exactly matches the model architecture of an existing model you only have to add a conversion script as described in [this section](#write-a-conversion-script). In this case, you can just re-use the whole model architecture of the already existing model. Otherwise, let's start generating a new model. You have two choices here: - `transformers-cli add-new-model-like` to add a new model like an existing one - `transformers-cli add-new-model` to add a new model from our template (will look like BERT or Bart depending on the type of model you select) In both cases, you will be prompted with a questionnaire to fill the basic information of your model. The second command requires to install `cookiecutter`, you can find more information on it [here](https://github.com/huggingface/transformers/tree/main/templates/adding_a_new_model). **Open a Pull Request on the main huggingface/transformers repo** Before starting to adapt the automatically generated code, now is the time to open a “Work in progress (WIP)” pull request, *e.g.* “[WIP] Add *brand_new_bert*”, in 🤗 Transformers so that you and the Hugging Face team can work side-by-side on integrating the model into 🤗 Transformers. You should do the following: 1. Create a branch with a descriptive name from your main branch ```bash git checkout -b add_brand_new_bert ``` 2. Commit the automatically generated code: ```bash git add . git commit ``` 3. Fetch and rebase to current main ```bash git fetch upstream git rebase upstream/main ``` 4. Push the changes to your account using: ```bash git push -u origin a-descriptive-name-for-my-changes ``` 5. Once you are satisfied, go to the webpage of your fork on GitHub. Click on “Pull request”. Make sure to add the GitHub handle of some members of the Hugging Face team as reviewers, so that the Hugging Face team gets notified for future changes. 6. Change the PR into a draft by clicking on “Convert to draft” on the right of the GitHub pull request web page. In the following, whenever you have done some progress, don't forget to commit your work and push it to your account so that it shows in the pull request. Additionally, you should make sure to update your work with the current main from time to time by doing: ```bash git fetch upstream git merge upstream/main ``` In general, all questions you might have regarding the model or your implementation should be asked in your PR and discussed/solved in the PR. This way, the Hugging Face team will always be notified when you are committing new code or if you have a question. It is often very helpful to point the Hugging Face team to your added code so that the Hugging Face team can efficiently understand your problem or question. To do so, you can go to the “Files changed” tab where you see all of your changes, go to a line regarding which you want to ask a question, and click on the “+” symbol to add a comment. Whenever a question or problem has been solved, you can click on the “Resolve” button of the created comment. In the same way, the Hugging Face team will open comments when reviewing your code. We recommend asking most questions on GitHub on your PR. For some very general questions that are not very useful for the public, feel free to ping the Hugging Face team by Slack or email. **5. Adapt the generated models code for brand_new_bert** At first, we will focus only on the model itself and not care about the tokenizer. All the relevant code should be found in the generated files `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py` and `src/transformers/models/brand_new_bert/configuration_brand_new_bert.py`. Now you can finally start coding :). The generated code in `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py` will either have the same architecture as BERT if it's an encoder-only model or BART if it's an encoder-decoder model. At this point, you should remind yourself what you've learned in the beginning about the theoretical aspects of the model: *How is the model different from BERT or BART?*". Implement those changes which often means to change the *self-attention* layer, the order of the normalization layer, etc… Again, it is often useful to look at the similar architecture of already existing models in Transformers to get a better feeling of how your model should be implemented. **Note** that at this point, you don't have to be very sure that your code is fully correct or clean. Rather, it is advised to add a first *unclean*, copy-pasted version of the original code to `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py` until you feel like all the necessary code is added. From our experience, it is much more efficient to quickly add a first version of the required code and improve/correct the code iteratively with the conversion script as described in the next section. The only thing that has to work at this point is that you can instantiate the 🤗 Transformers implementation of *brand_new_bert*, *i.e.* the following command should work: ```python from transformers import BrandNewBertModel, BrandNewBertConfig model = BrandNewBertModel(BrandNewBertConfig()) ``` The above command will create a model according to the default parameters as defined in `BrandNewBertConfig()` with random weights, thus making sure that the `init()` methods of all components works. **6. Write a conversion script** Next, you should write a conversion script that lets you convert the checkpoint you used to debug *brand_new_bert* in the original repository to a checkpoint compatible with your just created 🤗 Transformers implementation of *brand_new_bert*. It is not advised to write the conversion script from scratch, but rather to look through already existing conversion scripts in 🤗 Transformers for one that has been used to convert a similar model that was written in the same framework as *brand_new_bert*. Usually, it is enough to copy an already existing conversion script and slightly adapt it for your use case. Don't hesitate to ask the Hugging Face team to point you to a similar already existing conversion script for your model. - If you are porting a model from TensorFlow to PyTorch, a good starting point might be BERT's conversion script [here](https://github.com/huggingface/transformers/blob/7acfa95afb8194f8f9c1f4d2c6028224dbed35a2/src/transformers/models/bert/modeling_bert.py#L91) - If you are porting a model from PyTorch to PyTorch, a good starting point might be BART's conversion script [here](https://github.com/huggingface/transformers/blob/main/src/transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py) In the following, we'll quickly explain how PyTorch models store layer weights and define layer names. In PyTorch, the name of a layer is defined by the name of the class attribute you give the layer. Let's define a dummy model in PyTorch, called `SimpleModel` as follows: ```python from torch import nn class SimpleModel(nn.Module): def __init__(self): super().__init__() self.dense = nn.Linear(10, 10) self.intermediate = nn.Linear(10, 10) self.layer_norm = nn.LayerNorm(10) ``` Now we can create an instance of this model definition which will fill all weights: `dense`, `intermediate`, `layer_norm` with random weights. We can print the model to see its architecture ```python model = SimpleModel() print(model) ``` This will print out the following: ``` SimpleModel( (dense): Linear(in_features=10, out_features=10, bias=True) (intermediate): Linear(in_features=10, out_features=10, bias=True) (layer_norm): LayerNorm((10,), eps=1e-05, elementwise_affine=True) ) ``` We can see that the layer names are defined by the name of the class attribute in PyTorch. You can print out the weight values of a specific layer: ```python print(model.dense.weight.data) ``` to see that the weights were randomly initialized ``` tensor([[-0.0818, 0.2207, -0.0749, -0.0030, 0.0045, -0.1569, -0.1598, 0.0212, -0.2077, 0.2157], [ 0.1044, 0.0201, 0.0990, 0.2482, 0.3116, 0.2509, 0.2866, -0.2190, 0.2166, -0.0212], [-0.2000, 0.1107, -0.1999, -0.3119, 0.1559, 0.0993, 0.1776, -0.1950, -0.1023, -0.0447], [-0.0888, -0.1092, 0.2281, 0.0336, 0.1817, -0.0115, 0.2096, 0.1415, -0.1876, -0.2467], [ 0.2208, -0.2352, -0.1426, -0.2636, -0.2889, -0.2061, -0.2849, -0.0465, 0.2577, 0.0402], [ 0.1502, 0.2465, 0.2566, 0.0693, 0.2352, -0.0530, 0.1859, -0.0604, 0.2132, 0.1680], [ 0.1733, -0.2407, -0.1721, 0.1484, 0.0358, -0.0633, -0.0721, -0.0090, 0.2707, -0.2509], [-0.1173, 0.1561, 0.2945, 0.0595, -0.1996, 0.2988, -0.0802, 0.0407, 0.1829, -0.1568], [-0.1164, -0.2228, -0.0403, 0.0428, 0.1339, 0.0047, 0.1967, 0.2923, 0.0333, -0.0536], [-0.1492, -0.1616, 0.1057, 0.1950, -0.2807, -0.2710, -0.1586, 0.0739, 0.2220, 0.2358]]). ``` In the conversion script, you should fill those randomly initialized weights with the exact weights of the corresponding layer in the checkpoint. *E.g.* ```python # retrieve matching layer weights, e.g. by # recursive algorithm layer_name = "dense" pretrained_weight = array_of_dense_layer model_pointer = getattr(model, "dense") model_pointer.weight.data = torch.from_numpy(pretrained_weight) ``` While doing so, you must verify that each randomly initialized weight of your PyTorch model and its corresponding pretrained checkpoint weight exactly match in both **shape and name**. To do so, it is **necessary** to add assert statements for the shape and print out the names of the checkpoints weights. E.g. you should add statements like: ```python assert ( model_pointer.weight.shape == pretrained_weight.shape ), f"Pointer shape of random weight {model_pointer.shape} and array shape of checkpoint weight {pretrained_weight.shape} mismatched" ``` Besides, you should also print out the names of both weights to make sure they match, *e.g.* ```python logger.info(f"Initialize PyTorch weight {layer_name} from {pretrained_weight.name}") ``` If either the shape or the name doesn't match, you probably assigned the wrong checkpoint weight to a randomly initialized layer of the 🤗 Transformers implementation. An incorrect shape is most likely due to an incorrect setting of the config parameters in `BrandNewBertConfig()` that do not exactly match those that were used for the checkpoint you want to convert. However, it could also be that PyTorch's implementation of a layer requires the weight to be transposed beforehand. Finally, you should also check that **all** required weights are initialized and print out all checkpoint weights that were not used for initialization to make sure the model is correctly converted. It is completely normal, that the conversion trials fail with either a wrong shape statement or wrong name assignment. This is most likely because either you used incorrect parameters in `BrandNewBertConfig()`, have a wrong architecture in the 🤗 Transformers implementation, you have a bug in the `init()` functions of one of the components of the 🤗 Transformers implementation or you need to transpose one of the checkpoint weights. This step should be iterated with the previous step until all weights of the checkpoint are correctly loaded in the Transformers model. Having correctly loaded the checkpoint into the 🤗 Transformers implementation, you can then save the model under a folder of your choice `/path/to/converted/checkpoint/folder` that should then contain both a `pytorch_model.bin` file and a `config.json` file: ```python model.save_pretrained("/path/to/converted/checkpoint/folder") ``` **7. Implement the forward pass** Having managed to correctly load the pretrained weights into the 🤗 Transformers implementation, you should now make sure that the forward pass is correctly implemented. In [Get familiar with the original repository](#run-a-pretrained-checkpoint-using-the-original-repository), you have already created a script that runs a forward pass of the model using the original repository. Now you should write an analogous script using the 🤗 Transformers implementation instead of the original one. It should look as follows: ```python model = BrandNewBertModel.from_pretrained("/path/to/converted/checkpoint/folder") input_ids = [0, 4, 4, 3, 2, 4, 1, 7, 19] output = model(input_ids).last_hidden_states ``` It is very likely that the 🤗 Transformers implementation and the original model implementation don't give the exact same output the very first time or that the forward pass throws an error. Don't be disappointed - it's expected! First, you should make sure that the forward pass doesn't throw any errors. It often happens that the wrong dimensions are used leading to a *Dimensionality mismatch* error or that the wrong data type object is used, *e.g.* `torch.long` instead of `torch.float32`. Don't hesitate to ask the Hugging Face team for help, if you don't manage to solve certain errors. The final part to make sure the 🤗 Transformers implementation works correctly is to ensure that the outputs are equivalent to a precision of `1e-3`. First, you should ensure that the output shapes are identical, *i.e.* `outputs.shape` should yield the same value for the script of the 🤗 Transformers implementation and the original implementation. Next, you should make sure that the output values are identical as well. This one of the most difficult parts of adding a new model. Common mistakes why the outputs are not identical are: - Some layers were not added, *i.e.* an *activation* layer was not added, or the residual connection was forgotten - The word embedding matrix was not tied - The wrong positional embeddings are used because the original implementation uses on offset - Dropout is applied during the forward pass. To fix this make sure *model.training is False* and that no dropout layer is falsely activated during the forward pass, *i.e.* pass *self.training* to [PyTorch's functional dropout](https://pytorch.org/docs/stable/nn.functional.html?highlight=dropout#torch.nn.functional.dropout) The best way to fix the problem is usually to look at the forward pass of the original implementation and the 🤗 Transformers implementation side-by-side and check if there are any differences. Ideally, you should debug/print out intermediate outputs of both implementations of the forward pass to find the exact position in the network where the 🤗 Transformers implementation shows a different output than the original implementation. First, make sure that the hard-coded `input_ids` in both scripts are identical. Next, verify that the outputs of the first transformation of the `input_ids` (usually the word embeddings) are identical. And then work your way up to the very last layer of the network. At some point, you will notice a difference between the two implementations, which should point you to the bug in the 🤗 Transformers implementation. From our experience, a simple and efficient way is to add many print statements in both the original implementation and 🤗 Transformers implementation, at the same positions in the network respectively, and to successively remove print statements showing the same values for intermediate presentations. When you're confident that both implementations yield the same output, verifying the outputs with `torch.allclose(original_output, output, atol=1e-3)`, you're done with the most difficult part! Congratulations - the work left to be done should be a cakewalk 😊. **8. Adding all necessary model tests** At this point, you have successfully added a new model. However, it is very much possible that the model does not yet fully comply with the required design. To make sure, the implementation is fully compatible with 🤗 Transformers, all common tests should pass. The Cookiecutter should have automatically added a test file for your model, probably under the same `tests/models/brand_new_bert/test_modeling_brand_new_bert.py`. Run this test file to verify that all common tests pass: ```bash pytest tests/models/brand_new_bert/test_modeling_brand_new_bert.py ``` Having fixed all common tests, it is now crucial to ensure that all the nice work you have done is well tested, so that - a) The community can easily understand your work by looking at specific tests of *brand_new_bert* - b) Future changes to your model will not break any important feature of the model. At first, integration tests should be added. Those integration tests essentially do the same as the debugging scripts you used earlier to implement the model to 🤗 Transformers. A template of those model tests is already added by the Cookiecutter, called `BrandNewBertModelIntegrationTests` and only has to be filled out by you. To ensure that those tests are passing, run ```bash RUN_SLOW=1 pytest -sv tests/models/brand_new_bert/test_modeling_brand_new_bert.py::BrandNewBertModelIntegrationTests ``` <Tip> In case you are using Windows, you should replace `RUN_SLOW=1` with `SET RUN_SLOW=1` </Tip> Second, all features that are special to *brand_new_bert* should be tested additionally in a separate test under `BrandNewBertModelTester`/``BrandNewBertModelTest`. This part is often forgotten but is extremely useful in two ways: - It helps to transfer the knowledge you have acquired during the model addition to the community by showing how the special features of *brand_new_bert* should work. - Future contributors can quickly test changes to the model by running those special tests. **9. Implement the tokenizer** Next, we should add the tokenizer of *brand_new_bert*. Usually, the tokenizer is equivalent or very similar to an already existing tokenizer of 🤗 Transformers. It is very important to find/extract the original tokenizer file and to manage to load this file into the 🤗 Transformers' implementation of the tokenizer. To ensure that the tokenizer works correctly, it is recommended to first create a script in the original repository that inputs a string and returns the `input_ids``. It could look similar to this (in pseudo-code): ```python input_str = "This is a long example input string containing special characters .$?-, numbers 2872 234 12 and words." model = BrandNewBertModel.load_pretrained_checkpoint("/path/to/checkpoint/") input_ids = model.tokenize(input_str) ``` You might have to take a deeper look again into the original repository to find the correct tokenizer function or you might even have to do changes to your clone of the original repository to only output the `input_ids`. Having written a functional tokenization script that uses the original repository, an analogous script for 🤗 Transformers should be created. It should look similar to this: ```python from transformers import BrandNewBertTokenizer input_str = "This is a long example input string containing special characters .$?-, numbers 2872 234 12 and words." tokenizer = BrandNewBertTokenizer.from_pretrained("/path/to/tokenizer/folder/") input_ids = tokenizer(input_str).input_ids ``` When both `input_ids` yield the same values, as a final step a tokenizer test file should also be added. Analogous to the modeling test files of *brand_new_bert*, the tokenization test files of *brand_new_bert* should contain a couple of hard-coded integration tests. **10. Run End-to-end integration tests** Having added the tokenizer, you should also add a couple of end-to-end integration tests using both the model and the tokenizer to `tests/models/brand_new_bert/test_modeling_brand_new_bert.py` in 🤗 Transformers. Such a test should show on a meaningful text-to-text sample that the 🤗 Transformers implementation works as expected. A meaningful text-to-text sample can include *e.g.* a source-to-target-translation pair, an article-to-summary pair, a question-to-answer pair, etc… If none of the ported checkpoints has been fine-tuned on a downstream task it is enough to simply rely on the model tests. In a final step to ensure that the model is fully functional, it is advised that you also run all tests on GPU. It can happen that you forgot to add some `.to(self.device)` statements to internal tensors of the model, which in such a test would show in an error. In case you have no access to a GPU, the Hugging Face team can take care of running those tests for you. **11. Add Docstring** Now, all the necessary functionality for *brand_new_bert* is added - you're almost done! The only thing left to add is a nice docstring and a doc page. The Cookiecutter should have added a template file called `docs/source/model_doc/brand_new_bert.mdx` that you should fill out. Users of your model will usually first look at this page before using your model. Hence, the documentation must be understandable and concise. It is very useful for the community to add some *Tips* to show how the model should be used. Don't hesitate to ping the Hugging Face team regarding the docstrings. Next, make sure that the docstring added to `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py` is correct and included all necessary inputs and outputs. We have a detailed guide about writing documentation and our docstring format [here](writing-documentation). It is always to good to remind oneself that documentation should be treated at least as carefully as the code in 🤗 Transformers since the documentation is usually the first contact point of the community with the model. **Code refactor** Great, now you have added all the necessary code for *brand_new_bert*. At this point, you should correct some potential incorrect code style by running: ```bash make style ``` and verify that your coding style passes the quality check: ```bash make quality ``` There are a couple of other very strict design tests in 🤗 Transformers that might still be failing, which shows up in the tests of your pull request. This is often because of some missing information in the docstring or some incorrect naming. The Hugging Face team will surely help you if you're stuck here. Lastly, it is always a good idea to refactor one's code after having ensured that the code works correctly. With all tests passing, now it's a good time to go over the added code again and do some refactoring. You have now finished the coding part, congratulation! 🎉 You are Awesome! 😎 **12. Upload the models to the model hub** In this final part, you should convert and upload all checkpoints to the model hub and add a model card for each uploaded model checkpoint. You can get familiar with the hub functionalities by reading our [Model sharing and uploading Page](model_sharing). You should work alongside the Hugging Face team here to decide on a fitting name for each checkpoint and to get the required access rights to be able to upload the model under the author's organization of *brand_new_bert*. The `push_to_hub` method, present in all models in `transformers`, is a quick and efficient way to push your checkpoint to the hub. A little snippet is pasted below: ```python brand_new_bert.push_to_hub("brand_new_bert") # Uncomment the following line to push to an organization. # brand_new_bert.push_to_hub("<organization>/brand_new_bert") ``` It is worth spending some time to create fitting model cards for each checkpoint. The model cards should highlight the specific characteristics of this particular checkpoint, *e.g.* On which dataset was the checkpoint pretrained/fine-tuned on? On what down-stream task should the model be used? And also include some code on how to correctly use the model. **13. (Optional) Add notebook** It is very helpful to add a notebook that showcases in-detail how *brand_new_bert* can be used for inference and/or fine-tuned on a downstream task. This is not mandatory to merge your PR, but very useful for the community. **14. Submit your finished PR** You're done programming now and can move to the last step, which is getting your PR merged into main. Usually, the Hugging Face team should have helped you already at this point, but it is worth taking some time to give your finished PR a nice description and eventually add comments to your code, if you want to point out certain design choices to your reviewer. ### Share your work!! Now, it's time to get some credit from the community for your work! Having completed a model addition is a major contribution to Transformers and the whole NLP community. Your code and the ported pre-trained models will certainly be used by hundreds and possibly even thousands of developers and researchers. You should be proud of your work and share your achievement with the community. **You have made another model that is super easy to access for everyone in the community! 🤯**
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./tests/models/retribert/__init__.py
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./tests/models/splinter/test_modeling_splinter.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Splinter model. """ import copy import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import SplinterConfig, SplinterForPreTraining, SplinterForQuestionAnswering, SplinterModel from transformers.models.splinter.modeling_splinter import SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST class SplinterModelTester: def __init__( self, parent, batch_size=13, num_questions=3, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, question_token_id=1, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.num_questions = num_questions self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.question_token_id = question_token_id self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_ids[:, 1] = self.question_token_id input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) start_positions = None end_positions = None question_positions = None if self.use_labels: start_positions = ids_tensor([self.batch_size, self.num_questions], self.type_sequence_label_size) end_positions = ids_tensor([self.batch_size, self.num_questions], self.type_sequence_label_size) question_positions = ids_tensor([self.batch_size, self.num_questions], self.num_labels) config = SplinterConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, question_token_id=self.question_token_id, ) return (config, input_ids, token_type_ids, input_mask, start_positions, end_positions, question_positions) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, start_positions, end_positions, question_positions, ): model = SplinterModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, start_positions, end_positions, question_positions, ): model = SplinterForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=start_positions[:, 0], end_positions=end_positions[:, 0], ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_pretraining( self, config, input_ids, token_type_ids, input_mask, start_positions, end_positions, question_positions, ): model = SplinterForPreTraining(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=start_positions, end_positions=end_positions, question_positions=question_positions, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.num_questions, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.num_questions, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, start_positions, end_positions, question_positions, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class SplinterModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = ( ( SplinterModel, SplinterForQuestionAnswering, SplinterForPreTraining, ) if is_torch_available() else () ) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if return_labels: if issubclass(model_class, SplinterForPreTraining): inputs_dict["start_positions"] = torch.zeros( self.model_tester.batch_size, self.model_tester.num_questions, dtype=torch.long, device=torch_device, ) inputs_dict["end_positions"] = torch.zeros( self.model_tester.batch_size, self.model_tester.num_questions, dtype=torch.long, device=torch_device, ) inputs_dict["question_positions"] = torch.zeros( self.model_tester.batch_size, self.model_tester.num_questions, dtype=torch.long, device=torch_device, ) elif issubclass(model_class, SplinterForQuestionAnswering): inputs_dict["start_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) inputs_dict["end_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = SplinterModelTester(self) self.config_tester = ConfigTester(self, config_class=SplinterConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): if isinstance(model, SplinterForPreTraining): with self.assertRaises(TypeError): # question_positions must not be None. model(**inputs)[0] else: model(**inputs)[0] @slow def test_model_from_pretrained(self): for model_name in SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = SplinterModel.from_pretrained(model_name) self.assertIsNotNone(model) # overwrite from common since `SplinterForPreTraining` could contain different number of question tokens in inputs. # When the batch is distributed to multiple devices, each replica could get different values for the maximal number # of question tokens (see `SplinterForPreTraining._prepare_question_positions()`), and the model returns different # shape along dimension 1 (i.e. `num_questions`) that could not be combined into a single tensor as an output. @require_torch_multi_gpu def test_multi_gpu_data_parallel_forward(self): from torch import nn config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # some params shouldn't be scattered by nn.DataParallel # so just remove them if they are present. blacklist_non_batched_params = ["head_mask", "decoder_head_mask", "cross_attn_head_mask"] for k in blacklist_non_batched_params: inputs_dict.pop(k, None) # move input tensors to cuda:O for k, v in inputs_dict.items(): if torch.is_tensor(v): inputs_dict[k] = v.to(0) for model_class in self.all_model_classes: # Skip this case since it will fail sometimes, as described above. if model_class == SplinterForPreTraining: continue model = model_class(config=config) model.to(0) model.eval() # Wrap model in nn.DataParallel model = nn.DataParallel(model) with torch.no_grad(): _ = model(**self._prepare_for_class(inputs_dict, model_class)) @require_torch class SplinterModelIntegrationTest(unittest.TestCase): @slow def test_splinter_question_answering(self): model = SplinterForQuestionAnswering.from_pretrained("tau/splinter-base-qass") # Input: "[CLS] Brad was born in [QUESTION] . He returned to the United Kingdom later . [SEP]" # Output should be the span "the United Kingdom" input_ids = torch.tensor( [[101, 7796, 1108, 1255, 1107, 104, 119, 1124, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102]] ) output = model(input_ids) expected_shape = torch.Size((1, 16)) self.assertEqual(output.start_logits.shape, expected_shape) self.assertEqual(output.end_logits.shape, expected_shape) self.assertEqual(torch.argmax(output.start_logits), 10) self.assertEqual(torch.argmax(output.end_logits), 12) @slow def test_splinter_pretraining(self): model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass") # Input: "[CLS] [QUESTION] was born in [QUESTION] . Brad returned to the United Kingdom later . [SEP]" # Output should be the spans "Brad" and "the United Kingdom" input_ids = torch.tensor( [[101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102]] ) question_positions = torch.tensor([[1, 5]], dtype=torch.long) output = model(input_ids, question_positions=question_positions) expected_shape = torch.Size((1, 2, 16)) self.assertEqual(output.start_logits.shape, expected_shape) self.assertEqual(output.end_logits.shape, expected_shape) self.assertEqual(torch.argmax(output.start_logits[0, 0]), 7) self.assertEqual(torch.argmax(output.end_logits[0, 0]), 7) self.assertEqual(torch.argmax(output.start_logits[0, 1]), 10) self.assertEqual(torch.argmax(output.end_logits[0, 1]), 12) @slow def test_splinter_pretraining_loss_requires_question_positions(self): model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass") # Input: "[CLS] [QUESTION] was born in [QUESTION] . Brad returned to the United Kingdom later . [SEP]" # Output should be the spans "Brad" and "the United Kingdom" input_ids = torch.tensor( [[101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102]] ) start_positions = torch.tensor([[7, 10]], dtype=torch.long) end_positions = torch.tensor([7, 12], dtype=torch.long) with self.assertRaises(TypeError): model( input_ids, start_positions=start_positions, end_positions=end_positions, ) @slow def test_splinter_pretraining_loss(self): model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass") # Input: "[CLS] [QUESTION] was born in [QUESTION] . Brad returned to the United Kingdom later . [SEP]" # Output should be the spans "Brad" and "the United Kingdom" input_ids = torch.tensor( [ [101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102], [101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102], ] ) start_positions = torch.tensor([[7, 10], [7, 10]], dtype=torch.long) end_positions = torch.tensor([[7, 12], [7, 12]], dtype=torch.long) question_positions = torch.tensor([[1, 5], [1, 5]], dtype=torch.long) output = model( input_ids, start_positions=start_positions, end_positions=end_positions, question_positions=question_positions, ) self.assertAlmostEqual(output.loss.item(), 0.0024, 4) @slow def test_splinter_pretraining_loss_with_padding(self): model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass") # Input: "[CLS] [QUESTION] was born in [QUESTION] . Brad returned to the United Kingdom later . [SEP]" # Output should be the spans "Brad" and "the United Kingdom" input_ids = torch.tensor( [ [101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102], ] ) start_positions = torch.tensor([[7, 10]], dtype=torch.long) end_positions = torch.tensor([7, 12], dtype=torch.long) question_positions = torch.tensor([[1, 5]], dtype=torch.long) start_positions_with_padding = torch.tensor([[7, 10, 0]], dtype=torch.long) end_positions_with_padding = torch.tensor([7, 12, 0], dtype=torch.long) question_positions_with_padding = torch.tensor([[1, 5, 0]], dtype=torch.long) output = model( input_ids, start_positions=start_positions, end_positions=end_positions, question_positions=question_positions, ) output_with_padding = model( input_ids, start_positions=start_positions_with_padding, end_positions=end_positions_with_padding, question_positions=question_positions_with_padding, ) self.assertAlmostEqual(output.loss.item(), output_with_padding.loss.item(), 4) # Note that the original code uses 0 to denote padded question tokens # and their start and end positions. As the pad_token_id of the model's # config is used for the losse's ignore_index in SplinterForPreTraining, # we add this test to ensure anybody making changes to the default # value of the config, will be aware of the implication. self.assertEqual(model.config.pad_token_id, 0) @slow def test_splinter_pretraining_prepare_question_positions(self): model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass") input_ids = torch.tensor( [ [101, 104, 1, 2, 104, 3, 4, 102], [101, 1, 104, 2, 104, 3, 104, 102], [101, 1, 2, 104, 104, 3, 4, 102], [101, 1, 2, 3, 4, 5, 104, 102], ] ) question_positions = torch.tensor([[1, 4, 0], [2, 4, 6], [3, 4, 0], [6, 0, 0]], dtype=torch.long) output_without_positions = model(input_ids) output_with_positions = model(input_ids, question_positions=question_positions) self.assertTrue((output_without_positions.start_logits == output_with_positions.start_logits).all()) self.assertTrue((output_without_positions.end_logits == output_with_positions.end_logits).all())
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Splinter model. """ import copy import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import SplinterConfig, SplinterForPreTraining, SplinterForQuestionAnswering, SplinterModel from transformers.models.splinter.modeling_splinter import SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST class SplinterModelTester: def __init__( self, parent, batch_size=13, num_questions=3, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, question_token_id=1, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.num_questions = num_questions self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.question_token_id = question_token_id self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_ids[:, 1] = self.question_token_id input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) start_positions = None end_positions = None question_positions = None if self.use_labels: start_positions = ids_tensor([self.batch_size, self.num_questions], self.type_sequence_label_size) end_positions = ids_tensor([self.batch_size, self.num_questions], self.type_sequence_label_size) question_positions = ids_tensor([self.batch_size, self.num_questions], self.num_labels) config = SplinterConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, question_token_id=self.question_token_id, ) return (config, input_ids, token_type_ids, input_mask, start_positions, end_positions, question_positions) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, start_positions, end_positions, question_positions, ): model = SplinterModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, start_positions, end_positions, question_positions, ): model = SplinterForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=start_positions[:, 0], end_positions=end_positions[:, 0], ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_pretraining( self, config, input_ids, token_type_ids, input_mask, start_positions, end_positions, question_positions, ): model = SplinterForPreTraining(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=start_positions, end_positions=end_positions, question_positions=question_positions, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.num_questions, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.num_questions, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, start_positions, end_positions, question_positions, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class SplinterModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = ( ( SplinterModel, SplinterForQuestionAnswering, SplinterForPreTraining, ) if is_torch_available() else () ) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if return_labels: if issubclass(model_class, SplinterForPreTraining): inputs_dict["start_positions"] = torch.zeros( self.model_tester.batch_size, self.model_tester.num_questions, dtype=torch.long, device=torch_device, ) inputs_dict["end_positions"] = torch.zeros( self.model_tester.batch_size, self.model_tester.num_questions, dtype=torch.long, device=torch_device, ) inputs_dict["question_positions"] = torch.zeros( self.model_tester.batch_size, self.model_tester.num_questions, dtype=torch.long, device=torch_device, ) elif issubclass(model_class, SplinterForQuestionAnswering): inputs_dict["start_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) inputs_dict["end_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = SplinterModelTester(self) self.config_tester = ConfigTester(self, config_class=SplinterConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): if isinstance(model, SplinterForPreTraining): with self.assertRaises(TypeError): # question_positions must not be None. model(**inputs)[0] else: model(**inputs)[0] @slow def test_model_from_pretrained(self): for model_name in SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = SplinterModel.from_pretrained(model_name) self.assertIsNotNone(model) # overwrite from common since `SplinterForPreTraining` could contain different number of question tokens in inputs. # When the batch is distributed to multiple devices, each replica could get different values for the maximal number # of question tokens (see `SplinterForPreTraining._prepare_question_positions()`), and the model returns different # shape along dimension 1 (i.e. `num_questions`) that could not be combined into a single tensor as an output. @require_torch_multi_gpu def test_multi_gpu_data_parallel_forward(self): from torch import nn config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # some params shouldn't be scattered by nn.DataParallel # so just remove them if they are present. blacklist_non_batched_params = ["head_mask", "decoder_head_mask", "cross_attn_head_mask"] for k in blacklist_non_batched_params: inputs_dict.pop(k, None) # move input tensors to cuda:O for k, v in inputs_dict.items(): if torch.is_tensor(v): inputs_dict[k] = v.to(0) for model_class in self.all_model_classes: # Skip this case since it will fail sometimes, as described above. if model_class == SplinterForPreTraining: continue model = model_class(config=config) model.to(0) model.eval() # Wrap model in nn.DataParallel model = nn.DataParallel(model) with torch.no_grad(): _ = model(**self._prepare_for_class(inputs_dict, model_class)) @require_torch class SplinterModelIntegrationTest(unittest.TestCase): @slow def test_splinter_question_answering(self): model = SplinterForQuestionAnswering.from_pretrained("tau/splinter-base-qass") # Input: "[CLS] Brad was born in [QUESTION] . He returned to the United Kingdom later . [SEP]" # Output should be the span "the United Kingdom" input_ids = torch.tensor( [[101, 7796, 1108, 1255, 1107, 104, 119, 1124, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102]] ) output = model(input_ids) expected_shape = torch.Size((1, 16)) self.assertEqual(output.start_logits.shape, expected_shape) self.assertEqual(output.end_logits.shape, expected_shape) self.assertEqual(torch.argmax(output.start_logits), 10) self.assertEqual(torch.argmax(output.end_logits), 12) @slow def test_splinter_pretraining(self): model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass") # Input: "[CLS] [QUESTION] was born in [QUESTION] . Brad returned to the United Kingdom later . [SEP]" # Output should be the spans "Brad" and "the United Kingdom" input_ids = torch.tensor( [[101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102]] ) question_positions = torch.tensor([[1, 5]], dtype=torch.long) output = model(input_ids, question_positions=question_positions) expected_shape = torch.Size((1, 2, 16)) self.assertEqual(output.start_logits.shape, expected_shape) self.assertEqual(output.end_logits.shape, expected_shape) self.assertEqual(torch.argmax(output.start_logits[0, 0]), 7) self.assertEqual(torch.argmax(output.end_logits[0, 0]), 7) self.assertEqual(torch.argmax(output.start_logits[0, 1]), 10) self.assertEqual(torch.argmax(output.end_logits[0, 1]), 12) @slow def test_splinter_pretraining_loss_requires_question_positions(self): model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass") # Input: "[CLS] [QUESTION] was born in [QUESTION] . Brad returned to the United Kingdom later . [SEP]" # Output should be the spans "Brad" and "the United Kingdom" input_ids = torch.tensor( [[101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102]] ) start_positions = torch.tensor([[7, 10]], dtype=torch.long) end_positions = torch.tensor([7, 12], dtype=torch.long) with self.assertRaises(TypeError): model( input_ids, start_positions=start_positions, end_positions=end_positions, ) @slow def test_splinter_pretraining_loss(self): model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass") # Input: "[CLS] [QUESTION] was born in [QUESTION] . Brad returned to the United Kingdom later . [SEP]" # Output should be the spans "Brad" and "the United Kingdom" input_ids = torch.tensor( [ [101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102], [101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102], ] ) start_positions = torch.tensor([[7, 10], [7, 10]], dtype=torch.long) end_positions = torch.tensor([[7, 12], [7, 12]], dtype=torch.long) question_positions = torch.tensor([[1, 5], [1, 5]], dtype=torch.long) output = model( input_ids, start_positions=start_positions, end_positions=end_positions, question_positions=question_positions, ) self.assertAlmostEqual(output.loss.item(), 0.0024, 4) @slow def test_splinter_pretraining_loss_with_padding(self): model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass") # Input: "[CLS] [QUESTION] was born in [QUESTION] . Brad returned to the United Kingdom later . [SEP]" # Output should be the spans "Brad" and "the United Kingdom" input_ids = torch.tensor( [ [101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102], ] ) start_positions = torch.tensor([[7, 10]], dtype=torch.long) end_positions = torch.tensor([7, 12], dtype=torch.long) question_positions = torch.tensor([[1, 5]], dtype=torch.long) start_positions_with_padding = torch.tensor([[7, 10, 0]], dtype=torch.long) end_positions_with_padding = torch.tensor([7, 12, 0], dtype=torch.long) question_positions_with_padding = torch.tensor([[1, 5, 0]], dtype=torch.long) output = model( input_ids, start_positions=start_positions, end_positions=end_positions, question_positions=question_positions, ) output_with_padding = model( input_ids, start_positions=start_positions_with_padding, end_positions=end_positions_with_padding, question_positions=question_positions_with_padding, ) self.assertAlmostEqual(output.loss.item(), output_with_padding.loss.item(), 4) # Note that the original code uses 0 to denote padded question tokens # and their start and end positions. As the pad_token_id of the model's # config is used for the losse's ignore_index in SplinterForPreTraining, # we add this test to ensure anybody making changes to the default # value of the config, will be aware of the implication. self.assertEqual(model.config.pad_token_id, 0) @slow def test_splinter_pretraining_prepare_question_positions(self): model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass") input_ids = torch.tensor( [ [101, 104, 1, 2, 104, 3, 4, 102], [101, 1, 104, 2, 104, 3, 104, 102], [101, 1, 2, 104, 104, 3, 4, 102], [101, 1, 2, 3, 4, 5, 104, 102], ] ) question_positions = torch.tensor([[1, 4, 0], [2, 4, 6], [3, 4, 0], [6, 0, 0]], dtype=torch.long) output_without_positions = model(input_ids) output_with_positions = model(input_ids, question_positions=question_positions) self.assertTrue((output_without_positions.start_logits == output_with_positions.start_logits).all()) self.assertTrue((output_without_positions.end_logits == output_with_positions.end_logits).all())
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./.git/objects/7f/ad55276d7a1ccfc5858da290033f5b26d5003b
xmLgcfIhP fT֖}KZ\a (Lg6~h-,?D*`9`BudEֳYݿϓ'b(Ty>GFR !ۥnhNPUZ;G X ![ut3Jv,1J ( =m\zP5DQ(cf6*L쭤ч-Uɂc@锅{uW<'U[h&& B)Bj~FHgG baA E&nS+NQsO"Pc㴸[n&pt OJP%Y_s"(}=P. I/Ie΍1A؂x Z:\pVp^>Zu.rE3 |]gG일(:Uȝ$Z b0[k]q+كN"]Vw֑ݬjLY;jtZ2^r4/T;UE=[aceEc/-(dg}~Uk'RT֏LR_0hv/Si,I4R)w Y"Hx=Ƥ﶐5>t9qOÓOCNyA>UDZq\Տ2JJ|o5XٓNLp( cAe}݆6Tb^_RUcN"DĆ](d)-ZC=='+h?O$BvLuJ/ٔÎx;]+Kр P@󉢊6%*cs4$L.55ݤW+"9;3--xİx3x# zKfuӣZc )_(@E H϶8`4zpQ% Axhp "k D=yE`L UN6:w y3)(,P6p_kSV֝ >Rw^UJkH}=INOR]7;=uVNjAGc>܉RL\_n.)3yq$>'-<*xi,U@Mf:Wf}jlsjr<
xmLgcfIhP fT֖}KZ\a (Lg6~h-,?D*`9`BudEֳYݿϓ'b(Ty>GFR !ۥnhNPUZ;G X ![ut3Jv,1J ( =m\zP5DQ(cf6*L쭤ч-Uɂc@锅{uW<'U[h&& B)Bj~FHgG baA E&nS+NQsO"Pc㴸[n&pt OJP%Y_s"(}=P. I/Ie΍1A؂x Z:\pVp^>Zu.rE3 |]gG일(:Uȝ$Z b0[k]q+كN"]Vw֑ݬjLY;jtZ2^r4/T;UE=[aceEc/-(dg}~Uk'RT֏LR_0hv/Si,I4R)w Y"Hx=Ƥ﶐5>t9qOÓOCNyA>UDZq\Տ2JJ|o5XٓNLp( cAe}݆6Tb^_RUcN"DĆ](d)-ZC=='+h?O$BvLuJ/ٔÎx;]+Kр P@󉢊6%*cs4$L.55ݤW+"9;3--xİx3x# zKfuӣZc )_(@E H϶8`4zpQ% Axhp "k D=yE`L UN6:w y3)(,P6p_kSV֝ >Rw^UJkH}=INOR]7;=uVNjAGc>܉RL\_n.)3yq$>'-<*xi,U@Mf:Wf}jlsjr<
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./examples/pytorch/speech-pretraining/requirements.txt
datasets >= 1.12.0 torch >= 1.5 torchaudio accelerate >= 0.12.0 librosa
datasets >= 1.12.0 torch >= 1.5 torchaudio accelerate >= 0.12.0 librosa
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./examples/research_projects/visual_bert/requirements.txt
appdirs==1.4.3 argon2-cffi==20.1.0 async-generator==1.10 attrs==20.2.0 backcall==0.2.0 CacheControl==0.12.6 certifi==2020.6.20 cffi==1.14.2 chardet==3.0.4 click==7.1.2 colorama==0.4.3 contextlib2==0.6.0 cycler==0.10.0 datasets==1.0.0 decorator==4.4.2 defusedxml==0.6.0 dill==0.3.2 distlib==0.3.0 distro==1.4.0 entrypoints==0.3 filelock==3.0.12 future==0.18.2 html5lib==1.0.1 idna==2.8 ipaddr==2.2.0 ipykernel==5.3.4 ipython ipython-genutils==0.2.0 ipywidgets==7.5.1 jedi==0.17.2 Jinja2>=2.11.3 joblib==1.2.0 jsonschema==3.2.0 jupyter==1.0.0 jupyter-client==6.1.7 jupyter-console==6.2.0 jupyter-core==4.6.3 jupyterlab-pygments==0.1.1 kiwisolver==1.2.0 lockfile==0.12.2 MarkupSafe==1.1.1 matplotlib==3.3.1 mistune==2.0.3 msgpack==0.6.2 nbclient==0.5.0 nbconvert==6.5.1 nbformat==5.0.7 nest-asyncio==1.4.0 notebook==6.4.12 numpy==1.22.0 opencv-python==4.4.0.42 packaging==20.3 pandas==1.1.2 pandocfilters==1.4.2 parso==0.7.1 pep517==0.8.2 pexpect==4.8.0 pickleshare==0.7.5 Pillow>=8.1.1 progress==1.5 prometheus-client==0.8.0 prompt-toolkit==3.0.7 ptyprocess==0.6.0 pyaml==20.4.0 pyarrow==1.0.1 pycparser==2.20 Pygments>=2.7.4 pyparsing==2.4.6 pyrsistent==0.16.0 python-dateutil==2.8.1 pytoml==0.1.21 pytz==2020.1 PyYAML>=5.4 pyzmq==19.0.2 qtconsole==4.7.7 QtPy==1.9.0 regex==2020.7.14 requests==2.22.0 retrying==1.3.3 sacremoses==0.0.43 Send2Trash==1.5.0 sentencepiece==0.1.91 six==1.14.0 terminado==0.8.3 testpath==0.4.4 tokenizers==0.8.1rc2 torch==1.6.0 torchvision==0.7.0 tornado==6.0.4 tqdm==4.48.2 traitlets git+https://github.com/huggingface/transformers.git urllib3==1.26.5 wcwidth==0.2.5 webencodings==0.5.1 wget==3.2 widgetsnbextension==3.5.1 xxhash==2.0.0
appdirs==1.4.3 argon2-cffi==20.1.0 async-generator==1.10 attrs==20.2.0 backcall==0.2.0 CacheControl==0.12.6 certifi==2020.6.20 cffi==1.14.2 chardet==3.0.4 click==7.1.2 colorama==0.4.3 contextlib2==0.6.0 cycler==0.10.0 datasets==1.0.0 decorator==4.4.2 defusedxml==0.6.0 dill==0.3.2 distlib==0.3.0 distro==1.4.0 entrypoints==0.3 filelock==3.0.12 future==0.18.2 html5lib==1.0.1 idna==2.8 ipaddr==2.2.0 ipykernel==5.3.4 ipython ipython-genutils==0.2.0 ipywidgets==7.5.1 jedi==0.17.2 Jinja2>=2.11.3 joblib==1.2.0 jsonschema==3.2.0 jupyter==1.0.0 jupyter-client==6.1.7 jupyter-console==6.2.0 jupyter-core==4.6.3 jupyterlab-pygments==0.1.1 kiwisolver==1.2.0 lockfile==0.12.2 MarkupSafe==1.1.1 matplotlib==3.3.1 mistune==2.0.3 msgpack==0.6.2 nbclient==0.5.0 nbconvert==6.5.1 nbformat==5.0.7 nest-asyncio==1.4.0 notebook==6.4.12 numpy==1.22.0 opencv-python==4.4.0.42 packaging==20.3 pandas==1.1.2 pandocfilters==1.4.2 parso==0.7.1 pep517==0.8.2 pexpect==4.8.0 pickleshare==0.7.5 Pillow>=8.1.1 progress==1.5 prometheus-client==0.8.0 prompt-toolkit==3.0.7 ptyprocess==0.6.0 pyaml==20.4.0 pyarrow==1.0.1 pycparser==2.20 Pygments>=2.7.4 pyparsing==2.4.6 pyrsistent==0.16.0 python-dateutil==2.8.1 pytoml==0.1.21 pytz==2020.1 PyYAML>=5.4 pyzmq==19.0.2 qtconsole==4.7.7 QtPy==1.9.0 regex==2020.7.14 requests==2.22.0 retrying==1.3.3 sacremoses==0.0.43 Send2Trash==1.5.0 sentencepiece==0.1.91 six==1.14.0 terminado==0.8.3 testpath==0.4.4 tokenizers==0.8.1rc2 torch==1.6.0 torchvision==0.7.0 tornado==6.0.4 tqdm==4.48.2 traitlets git+https://github.com/huggingface/transformers.git urllib3==1.26.5 wcwidth==0.2.5 webencodings==0.5.1 wget==3.2 widgetsnbextension==3.5.1 xxhash==2.0.0
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./examples/research_projects/deebert/src/modeling_highway_bert.py
import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEmbeddings, BertLayer, BertPooler, BertPreTrainedModel, ) def entropy(x): """Calculate entropy of a pre-softmax logit Tensor""" exp_x = torch.exp(x) A = torch.sum(exp_x, dim=1) # sum of exp(x_i) B = torch.sum(x * exp_x, dim=1) # sum of x_i * exp(x_i) return torch.log(A) - B / A class DeeBertEncoder(nn.Module): def __init__(self, config): super().__init__() self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)]) self.highway = nn.ModuleList([BertHighway(config) for _ in range(config.num_hidden_layers)]) self.early_exit_entropy = [-1 for _ in range(config.num_hidden_layers)] def set_early_exit_entropy(self, x): if (type(x) is float) or (type(x) is int): for i in range(len(self.early_exit_entropy)): self.early_exit_entropy[i] = x else: self.early_exit_entropy = x def init_highway_pooler(self, pooler): loaded_model = pooler.state_dict() for highway in self.highway: for name, param in highway.pooler.state_dict().items(): param.copy_(loaded_model[name]) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, ): all_hidden_states = () all_attentions = () all_highway_exits = () for i, layer_module in enumerate(self.layer): if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask ) hidden_states = layer_outputs[0] if self.output_attentions: all_attentions = all_attentions + (layer_outputs[1],) current_outputs = (hidden_states,) if self.output_hidden_states: current_outputs = current_outputs + (all_hidden_states,) if self.output_attentions: current_outputs = current_outputs + (all_attentions,) highway_exit = self.highway[i](current_outputs) # logits, pooled_output if not self.training: highway_logits = highway_exit[0] highway_entropy = entropy(highway_logits) highway_exit = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy all_highway_exits = all_highway_exits + (highway_exit,) if highway_entropy < self.early_exit_entropy[i]: new_output = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(new_output, i + 1) else: all_highway_exits = all_highway_exits + (highway_exit,) # Add last layer if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = (hidden_states,) if self.output_hidden_states: outputs = outputs + (all_hidden_states,) if self.output_attentions: outputs = outputs + (all_attentions,) outputs = outputs + (all_highway_exits,) return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits @add_start_docstrings( "The Bert Model transformer with early exiting (DeeBERT). ", BERT_START_DOCSTRING, ) class DeeBertModel(BertPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embeddings = BertEmbeddings(config) self.encoder = DeeBertEncoder(config) self.pooler = BertPooler(config) self.init_weights() def init_highway_pooler(self): self.encoder.init_highway_pooler(self.pooler) def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, ): r""" Return: :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs: last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pre-training. This output is usually *not* a good summary of the semantic content of the input, you're often better with averaging or pooling the sequence of hidden-states for the whole input sequence. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. highway_exits (:obj:`tuple(tuple(torch.Tensor))`: Tuple of each early exit's results (total length: number of layers) Each tuple is again, a tuple of length 2 - the first entry is logits and the second entry is hidden states. """ if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_attention_mask.dim() == 3: encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] encoder_extended_attention_mask = encoder_extended_attention_mask.to( dtype=next(self.parameters()).dtype ) # fp16 compatibility encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) outputs = (sequence_output, pooled_output,) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits class HighwayException(Exception): def __init__(self, message, exit_layer): self.message = message self.exit_layer = exit_layer # start from 1! class BertHighway(nn.Module): """A module to provide a shortcut from (the output of one non-final BertLayer in BertEncoder) to (cross-entropy computation in BertForSequenceClassification) """ def __init__(self, config): super().__init__() self.pooler = BertPooler(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) def forward(self, encoder_outputs): # Pooler pooler_input = encoder_outputs[0] pooler_output = self.pooler(pooler_input) # "return" pooler_output # BertModel bmodel_output = (pooler_input, pooler_output) + encoder_outputs[1:] # "return" bmodel_output # Dropout and classification pooled_output = bmodel_output[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) return logits, pooled_output @add_start_docstrings( """Bert Model (with early exiting - DeeBERT) with a classifier on top, also takes care of multi-layer training. """, BERT_START_DOCSTRING, ) class DeeBertForSequenceClassification(BertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.num_layers = config.num_hidden_layers self.bert = DeeBertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) self.init_weights() @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_layer=-1, train_highway=False, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ..., config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss), If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided): Classification (or regression if config.num_labels==1) loss. logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. highway_exits (:obj:`tuple(tuple(torch.Tensor))`: Tuple of each early exit's results (total length: number of layers) Each tuple is again, a tuple of length 2 - the first entry is logits and the second entry is hidden states. """ exit_layer = self.num_layers try: outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, ) # sequence_output, pooled_output, (hidden_states), (attentions), highway exits pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: outputs = e.message exit_layer = e.exit_layer logits = outputs[0] if not self.training: original_entropy = entropy(logits) highway_entropy = [] highway_logits_all = [] if labels is not None: if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) # work with highway exits highway_losses = [] for highway_exit in outputs[-1]: highway_logits = highway_exit[0] if not self.training: highway_logits_all.append(highway_logits) highway_entropy.append(highway_exit[2]) if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() highway_loss = loss_fct(highway_logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() highway_loss = loss_fct(highway_logits.view(-1, self.num_labels), labels.view(-1)) highway_losses.append(highway_loss) if train_highway: outputs = (sum(highway_losses[:-1]),) + outputs # exclude the final highway, of course else: outputs = (loss,) + outputs if not self.training: outputs = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: outputs = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEmbeddings, BertLayer, BertPooler, BertPreTrainedModel, ) def entropy(x): """Calculate entropy of a pre-softmax logit Tensor""" exp_x = torch.exp(x) A = torch.sum(exp_x, dim=1) # sum of exp(x_i) B = torch.sum(x * exp_x, dim=1) # sum of x_i * exp(x_i) return torch.log(A) - B / A class DeeBertEncoder(nn.Module): def __init__(self, config): super().__init__() self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)]) self.highway = nn.ModuleList([BertHighway(config) for _ in range(config.num_hidden_layers)]) self.early_exit_entropy = [-1 for _ in range(config.num_hidden_layers)] def set_early_exit_entropy(self, x): if (type(x) is float) or (type(x) is int): for i in range(len(self.early_exit_entropy)): self.early_exit_entropy[i] = x else: self.early_exit_entropy = x def init_highway_pooler(self, pooler): loaded_model = pooler.state_dict() for highway in self.highway: for name, param in highway.pooler.state_dict().items(): param.copy_(loaded_model[name]) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, ): all_hidden_states = () all_attentions = () all_highway_exits = () for i, layer_module in enumerate(self.layer): if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask ) hidden_states = layer_outputs[0] if self.output_attentions: all_attentions = all_attentions + (layer_outputs[1],) current_outputs = (hidden_states,) if self.output_hidden_states: current_outputs = current_outputs + (all_hidden_states,) if self.output_attentions: current_outputs = current_outputs + (all_attentions,) highway_exit = self.highway[i](current_outputs) # logits, pooled_output if not self.training: highway_logits = highway_exit[0] highway_entropy = entropy(highway_logits) highway_exit = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy all_highway_exits = all_highway_exits + (highway_exit,) if highway_entropy < self.early_exit_entropy[i]: new_output = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(new_output, i + 1) else: all_highway_exits = all_highway_exits + (highway_exit,) # Add last layer if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = (hidden_states,) if self.output_hidden_states: outputs = outputs + (all_hidden_states,) if self.output_attentions: outputs = outputs + (all_attentions,) outputs = outputs + (all_highway_exits,) return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits @add_start_docstrings( "The Bert Model transformer with early exiting (DeeBERT). ", BERT_START_DOCSTRING, ) class DeeBertModel(BertPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embeddings = BertEmbeddings(config) self.encoder = DeeBertEncoder(config) self.pooler = BertPooler(config) self.init_weights() def init_highway_pooler(self): self.encoder.init_highway_pooler(self.pooler) def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, ): r""" Return: :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs: last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pre-training. This output is usually *not* a good summary of the semantic content of the input, you're often better with averaging or pooling the sequence of hidden-states for the whole input sequence. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. highway_exits (:obj:`tuple(tuple(torch.Tensor))`: Tuple of each early exit's results (total length: number of layers) Each tuple is again, a tuple of length 2 - the first entry is logits and the second entry is hidden states. """ if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_attention_mask.dim() == 3: encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] encoder_extended_attention_mask = encoder_extended_attention_mask.to( dtype=next(self.parameters()).dtype ) # fp16 compatibility encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) outputs = (sequence_output, pooled_output,) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits class HighwayException(Exception): def __init__(self, message, exit_layer): self.message = message self.exit_layer = exit_layer # start from 1! class BertHighway(nn.Module): """A module to provide a shortcut from (the output of one non-final BertLayer in BertEncoder) to (cross-entropy computation in BertForSequenceClassification) """ def __init__(self, config): super().__init__() self.pooler = BertPooler(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) def forward(self, encoder_outputs): # Pooler pooler_input = encoder_outputs[0] pooler_output = self.pooler(pooler_input) # "return" pooler_output # BertModel bmodel_output = (pooler_input, pooler_output) + encoder_outputs[1:] # "return" bmodel_output # Dropout and classification pooled_output = bmodel_output[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) return logits, pooled_output @add_start_docstrings( """Bert Model (with early exiting - DeeBERT) with a classifier on top, also takes care of multi-layer training. """, BERT_START_DOCSTRING, ) class DeeBertForSequenceClassification(BertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.num_layers = config.num_hidden_layers self.bert = DeeBertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) self.init_weights() @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_layer=-1, train_highway=False, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ..., config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss), If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided): Classification (or regression if config.num_labels==1) loss. logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. highway_exits (:obj:`tuple(tuple(torch.Tensor))`: Tuple of each early exit's results (total length: number of layers) Each tuple is again, a tuple of length 2 - the first entry is logits and the second entry is hidden states. """ exit_layer = self.num_layers try: outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, ) # sequence_output, pooled_output, (hidden_states), (attentions), highway exits pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: outputs = e.message exit_layer = e.exit_layer logits = outputs[0] if not self.training: original_entropy = entropy(logits) highway_entropy = [] highway_logits_all = [] if labels is not None: if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) # work with highway exits highway_losses = [] for highway_exit in outputs[-1]: highway_logits = highway_exit[0] if not self.training: highway_logits_all.append(highway_logits) highway_entropy.append(highway_exit[2]) if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() highway_loss = loss_fct(highway_logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() highway_loss = loss_fct(highway_logits.view(-1, self.num_labels), labels.view(-1)) highway_losses.append(highway_loss) if train_highway: outputs = (sum(highway_losses[:-1]),) + outputs # exclude the final highway, of course else: outputs = (loss,) + outputs if not self.training: outputs = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: outputs = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./examples/flax/language-modeling/t5_tokenizer_model.py
#!/usr/bin/env python3 import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class SentencePieceUnigramTokenizer(BaseTokenizer): """ This class is a copy of `DeDLOC's tokenizer implementation <https://github.com/yandex-research/DeDLOC/blob/main/sahajbert/tokenizer/tokenizer_model.py>`__ . Custom SentencePiece Unigram Tokenizer with NMT, NKFC, spaces and lower-casing characters normalization Represents the Unigram algorithm, with the pretokenization used by SentencePiece """ def __init__( self, replacement: str = "▁", add_prefix_space: bool = True, unk_token: Union[str, AddedToken] = "<unk>", eos_token: Union[str, AddedToken] = "</s>", pad_token: Union[str, AddedToken] = "<pad>", ): self.special_tokens = { "pad": {"id": 0, "token": pad_token}, "eos": {"id": 1, "token": eos_token}, "unk": {"id": 2, "token": unk_token}, } self.special_tokens_list = [None] * len(self.special_tokens) for token_dict in self.special_tokens.values(): self.special_tokens_list[token_dict["id"]] = token_dict["token"] tokenizer = Tokenizer(Unigram()) tokenizer.normalizer = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(" {2,}"), " "), normalizers.Lowercase(), ] ) tokenizer.pre_tokenizer = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space), pre_tokenizers.Digits(individual_digits=True), pre_tokenizers.Punctuation(), ] ) tokenizer.decoder = decoders.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space) tokenizer.post_processor = TemplateProcessing( single=f"$A {self.special_tokens['eos']['token']}", special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])], ) parameters = { "model": "SentencePieceUnigram", "replacement": replacement, "add_prefix_space": add_prefix_space, } super().__init__(tokenizer, parameters) def train( self, files: Union[str, List[str]], vocab_size: int = 8000, show_progress: bool = True, ): """Train the model using the given files""" trainer = trainers.UnigramTrainer( vocab_size=vocab_size, special_tokens=self.special_tokens_list, show_progress=show_progress, ) if isinstance(files, str): files = [files] self._tokenizer.train(files, trainer=trainer) self.add_unk_id() def train_from_iterator( self, iterator: Union[Iterator[str], Iterator[Iterator[str]]], vocab_size: int = 8000, show_progress: bool = True, ): """Train the model using the given iterator""" trainer = trainers.UnigramTrainer( vocab_size=vocab_size, special_tokens=self.special_tokens_list, show_progress=show_progress, ) self._tokenizer.train_from_iterator(iterator, trainer=trainer) self.add_unk_id() def add_unk_id(self): tokenizer_json = json.loads(self._tokenizer.to_str()) tokenizer_json["model"]["unk_id"] = self.special_tokens["unk"]["id"] self._tokenizer = Tokenizer.from_str(json.dumps(tokenizer_json))
#!/usr/bin/env python3 import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class SentencePieceUnigramTokenizer(BaseTokenizer): """ This class is a copy of `DeDLOC's tokenizer implementation <https://github.com/yandex-research/DeDLOC/blob/main/sahajbert/tokenizer/tokenizer_model.py>`__ . Custom SentencePiece Unigram Tokenizer with NMT, NKFC, spaces and lower-casing characters normalization Represents the Unigram algorithm, with the pretokenization used by SentencePiece """ def __init__( self, replacement: str = "▁", add_prefix_space: bool = True, unk_token: Union[str, AddedToken] = "<unk>", eos_token: Union[str, AddedToken] = "</s>", pad_token: Union[str, AddedToken] = "<pad>", ): self.special_tokens = { "pad": {"id": 0, "token": pad_token}, "eos": {"id": 1, "token": eos_token}, "unk": {"id": 2, "token": unk_token}, } self.special_tokens_list = [None] * len(self.special_tokens) for token_dict in self.special_tokens.values(): self.special_tokens_list[token_dict["id"]] = token_dict["token"] tokenizer = Tokenizer(Unigram()) tokenizer.normalizer = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(" {2,}"), " "), normalizers.Lowercase(), ] ) tokenizer.pre_tokenizer = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space), pre_tokenizers.Digits(individual_digits=True), pre_tokenizers.Punctuation(), ] ) tokenizer.decoder = decoders.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space) tokenizer.post_processor = TemplateProcessing( single=f"$A {self.special_tokens['eos']['token']}", special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])], ) parameters = { "model": "SentencePieceUnigram", "replacement": replacement, "add_prefix_space": add_prefix_space, } super().__init__(tokenizer, parameters) def train( self, files: Union[str, List[str]], vocab_size: int = 8000, show_progress: bool = True, ): """Train the model using the given files""" trainer = trainers.UnigramTrainer( vocab_size=vocab_size, special_tokens=self.special_tokens_list, show_progress=show_progress, ) if isinstance(files, str): files = [files] self._tokenizer.train(files, trainer=trainer) self.add_unk_id() def train_from_iterator( self, iterator: Union[Iterator[str], Iterator[Iterator[str]]], vocab_size: int = 8000, show_progress: bool = True, ): """Train the model using the given iterator""" trainer = trainers.UnigramTrainer( vocab_size=vocab_size, special_tokens=self.special_tokens_list, show_progress=show_progress, ) self._tokenizer.train_from_iterator(iterator, trainer=trainer) self.add_unk_id() def add_unk_id(self): tokenizer_json = json.loads(self._tokenizer.to_str()) tokenizer_json["model"]["unk_id"] = self.special_tokens["unk"]["id"] self._tokenizer = Tokenizer.from_str(json.dumps(tokenizer_json))
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./.git/objects/ee/eb4246c7440c4ad75a5695c66be38317274d0a
x+)JMU041`01̢LY[͇.ӵ4yZ嗥%%3dޱޖQcC3Ǘ{^)K,I,))L*-I-fHeSwLwRwE޷j)PaFi[J*<zL/Je0t̨߳U輪ưhzO笠^?51Hf3Lk[Ks^n &{ÀfT9~KzB<BC<AJ?#sbgqUl2Bz-6@G oĚSdly%|<]]2_˙~-'^݅# }*MNMIe1l<v2'TM+.]f>ænSCb({ 7g,++2R@ڏV6TmXR%US _͍P۩ŶЫ6y=Kϒ}9ݤ"?NtU 6i1{JTE uuv&'ʸ Tu% u'ٹ.١U9*C7#1>Ώ,)_7[:|cLna}S:U R0 5;v=Tm*K,O-M-)ڟ_Zv}꜉O {Ϯ}(Zf4`^*+dP۩e&wa@YGH~JON-bS#^?7bRSʅ bk})6v<)YqsD:"1 exdegV~F#3 @]̯ ǀ}vmk~VNxY^A)9ɉE) }YIWMl'@T嗤&g3k{_<SEӚbTg&0,XĠ$r^^BbZqrQfAI1CGίՙ?y/yUxePSKJ @._.3&8|(,ZNڙ-^ŷ[nAdKR*Ԏ1?QW۟4fd/i]uS4n}eJ#<' "_ZS̰OgIwud;j
x+)JMU041`01̢LY[͇.ӵ4yZ嗥%%3dޱޖQcC3Ǘ{^)K,I,))L*-I-fHeSwLwRwE޷j)PaFi[J*<zL/Je0t̨߳U輪ưhzO笠^?51Hf3Lk[Ks^n &{ÀfT9~KzB<BC<AJ?#sbgqUl2Bz-6@G oĚSdly%|<]]2_˙~-'^݅# }*MNMIe1l<v2'TM+.]f>ænSCb({ 7g,++2R@ڏV6TmXR%US _͍P۩ŶЫ6y=Kϒ}9ݤ"?NtU 6i1{JTE uuv&'ʸ Tu% u'ٹ.١U9*C7#1>Ώ,)_7[:|cLna}S:U R0 5;v=Tm*K,O-M-)ڟ_Zv}꜉O {Ϯ}(Zf4`^*+dP۩e&wa@YGH~JON-bS#^?7bRSʅ bk})6v<)YqsD:"1 exdegV~F#3 @]̯ ǀ}vmk~VNxY^A)9ɉE) }YIWMl'@T嗤&g3k{_<SEӚbTg&0,XĠ$r^^BbZqrQfAI1CGίՙ?y/yUxePSKJ @._.3&8|(,ZNڙ-^ŷ[nAdKR*Ԏ1?QW۟4fd/i]uS4n}eJ#<' "_ZS̰OgIwud;j
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./src/transformers/models/luke/convert_luke_original_pytorch_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert LUKE checkpoint.""" import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def convert_luke_checkpoint(checkpoint_path, metadata_path, entity_vocab_path, pytorch_dump_folder_path, model_size): # Load configuration defined in the metadata file with open(metadata_path) as metadata_file: metadata = json.load(metadata_file) config = LukeConfig(use_entity_aware_attention=True, **metadata["model_config"]) # Load in the weights from the checkpoint_path state_dict = torch.load(checkpoint_path, map_location="cpu") # Load the entity vocab file entity_vocab = load_entity_vocab(entity_vocab_path) tokenizer = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"]) # Add special tokens to the token vocabulary for downstream tasks entity_token_1 = AddedToken("<ent>", lstrip=False, rstrip=False) entity_token_2 = AddedToken("<ent2>", lstrip=False, rstrip=False) tokenizer.add_special_tokens(dict(additional_special_tokens=[entity_token_1, entity_token_2])) config.vocab_size += 2 print(f"Saving tokenizer to {pytorch_dump_folder_path}") tokenizer.save_pretrained(pytorch_dump_folder_path) with open(os.path.join(pytorch_dump_folder_path, LukeTokenizer.vocab_files_names["entity_vocab_file"]), "w") as f: json.dump(entity_vocab, f) tokenizer = LukeTokenizer.from_pretrained(pytorch_dump_folder_path) # Initialize the embeddings of the special tokens word_emb = state_dict["embeddings.word_embeddings.weight"] ent_emb = word_emb[tokenizer.convert_tokens_to_ids(["@"])[0]].unsqueeze(0) ent2_emb = word_emb[tokenizer.convert_tokens_to_ids(["#"])[0]].unsqueeze(0) state_dict["embeddings.word_embeddings.weight"] = torch.cat([word_emb, ent_emb, ent2_emb]) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers): for matrix_name in ["query.weight", "query.bias"]: prefix = f"encoder.layer.{layer_index}.attention.self." state_dict[prefix + "w2e_" + matrix_name] = state_dict[prefix + matrix_name] state_dict[prefix + "e2w_" + matrix_name] = state_dict[prefix + matrix_name] state_dict[prefix + "e2e_" + matrix_name] = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks entity_emb = state_dict["entity_embeddings.entity_embeddings.weight"] entity_emb[entity_vocab["[MASK2]"]] = entity_emb[entity_vocab["[MASK]"]] model = LukeModel(config=config).eval() missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) if not (len(missing_keys) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(f"Missing keys {', '.join(missing_keys)}. Expected only missing embeddings.position_ids") if not (all(key.startswith("entity_predictions") or key.startswith("lm_head") for key in unexpected_keys)): raise ValueError( "Unexpected keys" f" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions') or key.startswith('lm_head'))])}" ) # Check outputs tokenizer = LukeTokenizer.from_pretrained(pytorch_dump_folder_path, task="entity_classification") text = ( "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the" " new world number one avoid a humiliating second- round exit at Wimbledon ." ) span = (39, 42) encoding = tokenizer(text, entity_spans=[span], add_prefix_space=True, return_tensors="pt") outputs = model(**encoding) # Verify word hidden states if model_size == "large": expected_shape = torch.Size((1, 42, 1024)) expected_slice = torch.tensor( [[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] ) else: # base expected_shape = torch.Size((1, 42, 768)) expected_slice = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]]) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4): raise ValueError # Verify entity hidden states if model_size == "large": expected_shape = torch.Size((1, 1, 1024)) expected_slice = torch.tensor([[0.0466, -0.0106, -0.0179]]) else: # base expected_shape = torch.Size((1, 1, 768)) expected_slice = torch.tensor([[0.1457, 0.1044, 0.0174]]) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is" f" {expected_shape}" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], expected_slice, atol=1e-4): raise ValueError # Finally, save our PyTorch model and tokenizer print("Saving PyTorch model to {}".format(pytorch_dump_folder_path)) model.save_pretrained(pytorch_dump_folder_path) def load_entity_vocab(entity_vocab_path): entity_vocab = {} with open(entity_vocab_path, "r", encoding="utf-8") as f: for index, line in enumerate(f): title, _ = line.rstrip().split("\t") entity_vocab[title] = index return entity_vocab if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.") parser.add_argument( "--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration." ) parser.add_argument( "--entity_vocab_path", default=None, type=str, help="Path to an entity_vocab.tsv file, containing the entity vocabulary.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model." ) parser.add_argument( "--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted." ) args = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert LUKE checkpoint.""" import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def convert_luke_checkpoint(checkpoint_path, metadata_path, entity_vocab_path, pytorch_dump_folder_path, model_size): # Load configuration defined in the metadata file with open(metadata_path) as metadata_file: metadata = json.load(metadata_file) config = LukeConfig(use_entity_aware_attention=True, **metadata["model_config"]) # Load in the weights from the checkpoint_path state_dict = torch.load(checkpoint_path, map_location="cpu") # Load the entity vocab file entity_vocab = load_entity_vocab(entity_vocab_path) tokenizer = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"]) # Add special tokens to the token vocabulary for downstream tasks entity_token_1 = AddedToken("<ent>", lstrip=False, rstrip=False) entity_token_2 = AddedToken("<ent2>", lstrip=False, rstrip=False) tokenizer.add_special_tokens(dict(additional_special_tokens=[entity_token_1, entity_token_2])) config.vocab_size += 2 print(f"Saving tokenizer to {pytorch_dump_folder_path}") tokenizer.save_pretrained(pytorch_dump_folder_path) with open(os.path.join(pytorch_dump_folder_path, LukeTokenizer.vocab_files_names["entity_vocab_file"]), "w") as f: json.dump(entity_vocab, f) tokenizer = LukeTokenizer.from_pretrained(pytorch_dump_folder_path) # Initialize the embeddings of the special tokens word_emb = state_dict["embeddings.word_embeddings.weight"] ent_emb = word_emb[tokenizer.convert_tokens_to_ids(["@"])[0]].unsqueeze(0) ent2_emb = word_emb[tokenizer.convert_tokens_to_ids(["#"])[0]].unsqueeze(0) state_dict["embeddings.word_embeddings.weight"] = torch.cat([word_emb, ent_emb, ent2_emb]) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers): for matrix_name in ["query.weight", "query.bias"]: prefix = f"encoder.layer.{layer_index}.attention.self." state_dict[prefix + "w2e_" + matrix_name] = state_dict[prefix + matrix_name] state_dict[prefix + "e2w_" + matrix_name] = state_dict[prefix + matrix_name] state_dict[prefix + "e2e_" + matrix_name] = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks entity_emb = state_dict["entity_embeddings.entity_embeddings.weight"] entity_emb[entity_vocab["[MASK2]"]] = entity_emb[entity_vocab["[MASK]"]] model = LukeModel(config=config).eval() missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) if not (len(missing_keys) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(f"Missing keys {', '.join(missing_keys)}. Expected only missing embeddings.position_ids") if not (all(key.startswith("entity_predictions") or key.startswith("lm_head") for key in unexpected_keys)): raise ValueError( "Unexpected keys" f" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions') or key.startswith('lm_head'))])}" ) # Check outputs tokenizer = LukeTokenizer.from_pretrained(pytorch_dump_folder_path, task="entity_classification") text = ( "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the" " new world number one avoid a humiliating second- round exit at Wimbledon ." ) span = (39, 42) encoding = tokenizer(text, entity_spans=[span], add_prefix_space=True, return_tensors="pt") outputs = model(**encoding) # Verify word hidden states if model_size == "large": expected_shape = torch.Size((1, 42, 1024)) expected_slice = torch.tensor( [[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] ) else: # base expected_shape = torch.Size((1, 42, 768)) expected_slice = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]]) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4): raise ValueError # Verify entity hidden states if model_size == "large": expected_shape = torch.Size((1, 1, 1024)) expected_slice = torch.tensor([[0.0466, -0.0106, -0.0179]]) else: # base expected_shape = torch.Size((1, 1, 768)) expected_slice = torch.tensor([[0.1457, 0.1044, 0.0174]]) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is" f" {expected_shape}" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], expected_slice, atol=1e-4): raise ValueError # Finally, save our PyTorch model and tokenizer print("Saving PyTorch model to {}".format(pytorch_dump_folder_path)) model.save_pretrained(pytorch_dump_folder_path) def load_entity_vocab(entity_vocab_path): entity_vocab = {} with open(entity_vocab_path, "r", encoding="utf-8") as f: for index, line in enumerate(f): title, _ = line.rstrip().split("\t") entity_vocab[title] = index return entity_vocab if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.") parser.add_argument( "--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration." ) parser.add_argument( "--entity_vocab_path", default=None, type=str, help="Path to an entity_vocab.tsv file, containing the entity vocabulary.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model." ) parser.add_argument( "--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted." ) args = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./src/transformers/models/trajectory_transformer/modeling_trajectory_transformer.py
# coding=utf-8 # Copyright 2022 The Trajectory Transformers paper authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch TrajectoryTransformer model.""" import math import os from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch import torch.utils.checkpoint from torch import nn from torch.nn import functional as F from ...modeling_utils import PreTrainedModel from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_trajectory_transformer import TrajectoryTransformerConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "CarlCochet/trajectory-transformer-halfcheetah-medium-v2" _CONFIG_FOR_DOC = "TrajectoryTransformerConfig" TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [ "CarlCochet/trajectory-transformer-halfcheetah-medium-v2", # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer ] def load_tf_weights_in_trajectory_transformer(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f"Converting TensorFlow checkpoint from {tf_path}") # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): name = name.split("/") # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any( n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] for n in name ): logger.info(f"Skipping {'/'.join(name)}") continue pointer = model for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "kernel" or scope_names[0] == "gamma": pointer = getattr(pointer, "weight") elif scope_names[0] == "output_bias" or scope_names[0] == "beta": pointer = getattr(pointer, "bias") elif scope_names[0] == "output_weights": pointer = getattr(pointer, "weight") elif scope_names[0] == "squad": pointer = getattr(pointer, "classifier") else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info(f"Skipping {'/'.join(name)}") continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name[-11:] == "_embeddings": pointer = getattr(pointer, "weight") elif m_name == "kernel": array = np.transpose(array) try: if pointer.shape != array.shape: raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched") except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info(f"Initialize PyTorch weight {name}") pointer.data = torch.from_numpy(array) return model @dataclass class TrajectoryTransformerOutput(ModelOutput): """ Base class for model's outputs that also contains a pooling of the last hidden states. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Tuple[Tuple[torch.Tensor]]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of length `config.n_layers`, containing tuples of tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. GPT2Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None class TrajectoryTransformerPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = TrajectoryTransformerConfig load_tf_weights = load_tf_weights_in_trajectory_transformer base_model_prefix = "trajectory_transformer" main_input_name = "trajectories" supports_gradient_checkpointing = True def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, TrajectoryTransformerModel): module.gradient_checkpointing = value def _init_weights(self, module): if isinstance(module, (nn.Linear, nn.Embedding)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, EinLinear): for i in range(module.n_models): nn.init.kaiming_uniform_(module.weight[i], a=math.sqrt(5) / self.config.kaiming_initializer_range) if module.bias is not None: fan_in, _ = nn.init._calculate_fan_in_and_fan_out(module.weight[i]) bound = (1 / math.sqrt(fan_in)) * self.config.initializer_range nn.init.uniform_(module.bias[i], -bound, bound) TRAJECTORY_TRANSFORMER_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`TrajectoryTransformerConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ TRAJECTORY_TRANSFORMER_INPUTS_DOCSTRING = r""" Args: trajectories (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Batch of trajectories, where a trajectory is a sequence of states, actions and rewards. past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.n_layers`, *optional*): Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have their past given to this model should not be passed as `input_ids` as they have already been computed. targets (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Desired targets used to compute the loss. attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class EinLinear(nn.Module): def __init__(self, n_models, in_features, out_features, bias): super().__init__() self.n_models = n_models self.out_features = out_features self.in_features = in_features self.weight = nn.Parameter(torch.Tensor(n_models, out_features, in_features)) if bias: self.bias = nn.Parameter(torch.Tensor(n_models, out_features)) else: self.register_parameter("bias", None) def reset_parameters(self): for i in range(self.n_models): nn.init.kaiming_uniform_(self.weight[i], a=math.sqrt(5)) if self.bias is not None: fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight[i]) bound = 1 / math.sqrt(fan_in) nn.init.uniform_(self.bias[i], -bound, bound) def forward(self, input): """ Args: input (`torch.FloatTensor` of shape `(B, n_models, input_dim)`): The input to the layer. """ # [ batch_size x n_models x output_dim ] output = torch.einsum("eoi,bei->beo", self.weight, input) if self.bias is not None: raise RuntimeError() return output class CausalSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.n_embd % config.n_head != 0: raise ValueError(f"n_head ({config.n_head}) should be a divisor of n_embd ({config.n_embd})") # key, query, value projections for all heads self.key = nn.Linear(config.n_embd, config.n_embd) self.query = nn.Linear(config.n_embd, config.n_embd) self.value = nn.Linear(config.n_embd, config.n_embd) # regularization self.attn_drop = nn.Dropout(config.attn_pdrop) self.resid_drop = nn.Dropout(config.resid_pdrop) # output projection self.proj = nn.Linear(config.n_embd, config.n_embd) # causal mask to ensure that attention is only applied to the left in the input sequence self.register_buffer( "mask", torch.tril(torch.ones(config.block_size, config.block_size)).view( 1, 1, config.block_size, config.block_size ), ) # mask previous value estimates joined_dim = config.observation_dim + config.action_dim + 2 self.mask.squeeze()[:, joined_dim - 1 :: joined_dim] = 0 self.n_head = config.n_head def forward( self, hidden_states: Optional[Tuple[torch.FloatTensor]], layer_past: Optional[Tuple[torch.Tensor]] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, ): batch_size, sequence_length, embedding_dim = hidden_states.size() # calculate query, key, values for all heads in batch and move head forward to be the batch dim # [ batch_size x n_heads x sequence_length x head_dim ] key = ( self.key(hidden_states) .view(batch_size, sequence_length, self.n_head, embedding_dim // self.n_head) .transpose(1, 2) ) query = ( self.query(hidden_states) .view(batch_size, sequence_length, self.n_head, embedding_dim // self.n_head) .transpose(1, 2) ) value = ( self.value(hidden_states) .view(batch_size, sequence_length, self.n_head, embedding_dim // self.n_head) .transpose(1, 2) ) if layer_past is not None: past_key, past_value = layer_past key = torch.cat((past_key, key), dim=-2) value = torch.cat((past_value, value), dim=-2) if use_cache is True: present = (key, value) else: present = None # causal self-attention # [ batch_size x n_heads x sequence_length x sequence_length ] attn_weights = (torch.matmul(query, key.transpose(-2, -1))) * (1.0 / math.sqrt(key.size(-1))) attn_weights = attn_weights.masked_fill( self.mask[:, :, :sequence_length, :sequence_length] == 0, torch.finfo(attn_weights.dtype).min ) attn_weights = F.softmax(attn_weights, dim=-1) self._attn_map = attn_weights.clone() attn_weights = self.attn_drop(attn_weights) output = torch.matmul(attn_weights, value) # [ batch_size x sequence_length x embedding_dim ] # re-assemble all head outputs side by side output = output.transpose(1, 2).contiguous().view(batch_size, sequence_length, embedding_dim) # output projection output = self.resid_drop(self.proj(output)) outputs = (output, present) if output_attentions: outputs += (attn_weights,) return outputs class Block(nn.Module): def __init__(self, config): super().__init__() self.ln1 = nn.LayerNorm(config.n_embd) self.ln2 = nn.LayerNorm(config.n_embd) self.attn = CausalSelfAttention(config) # MLP self.l1 = nn.Linear(config.n_embd, 4 * config.n_embd) self.act = nn.GELU() self.l2 = nn.Linear(4 * config.n_embd, config.n_embd) self.drop = nn.Dropout(config.resid_pdrop) def forward( self, hidden_states: Optional[Tuple[torch.FloatTensor]], layer_past: Optional[Tuple[torch.Tensor]] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, ): residual = hidden_states hidden_states = self.ln1(hidden_states) attn_outputs = self.attn( hidden_states, layer_past=layer_past, use_cache=use_cache, output_attentions=output_attentions ) attn_output = attn_outputs[0] outputs = attn_outputs[1:] hidden_states = attn_output + residual residual = hidden_states hidden_states = self.ln2(hidden_states) hidden_states = self.l1(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.l2(hidden_states) hidden_states = residual + self.drop(hidden_states) if use_cache: outputs = (hidden_states,) + outputs else: outputs = (hidden_states,) + outputs[1:] return outputs @add_start_docstrings( "The bare TrajectoryTransformer Model transformer outputting raw hidden-states without any specific head on top.", TRAJECTORY_TRANSFORMER_START_DOCSTRING, ) class TrajectoryTransformerModel(TrajectoryTransformerPreTrainedModel): """the full GPT language model, with a context size of block_size""" def __init__(self, config): super().__init__(config) # input embedding stem (+1 for stop token) self.tok_emb = nn.Embedding(config.vocab_size * config.transition_dim + 1, config.n_embd) self.pos_emb = nn.Parameter(torch.zeros(1, config.block_size, config.n_embd)) self.drop = nn.Dropout(config.embd_pdrop) # transformer self.blocks = nn.ModuleList([Block(config) for _ in range(config.n_layer)]) # decoder head self.ln_f = nn.LayerNorm(config.n_embd) self.head = EinLinear(config.transition_dim, config.n_embd, config.vocab_size + 1, bias=False) self.vocab_size = config.vocab_size self.stop_token = config.vocab_size * config.transition_dim self.block_size = config.block_size self.observation_dim = config.observation_dim self.action_dim = config.action_dim self.transition_dim = config.transition_dim self.embedding_dim = config.n_embd self.action_weight = config.action_weight self.reward_weight = config.reward_weight self.value_weight = config.value_weight self.gradient_checkpointing = False self.post_init() def get_block_size(self): return self.block_size def offset_tokens(self, trajectories): _, sequence_length = trajectories.shape n_states = int(np.ceil(sequence_length / self.transition_dim)) offsets = torch.arange(self.transition_dim) * self.vocab_size offsets = offsets.repeat(n_states).to(trajectories.device) offset_trajectories = trajectories + offsets[:sequence_length] offset_trajectories[trajectories == self.vocab_size] = self.stop_token return offset_trajectories def pad_to_full_observation(self, hidden_states): batch_size, sequence_length, _ = hidden_states.shape n_pad = (self.transition_dim - sequence_length % self.transition_dim) % self.transition_dim padding = torch.zeros(batch_size, n_pad, self.embedding_dim, device=hidden_states.device) # [ batch_size x padded_sequence_length' x embedding_dim ] hidden_states_pad = torch.cat([hidden_states, padding], dim=1) hidden_states_pad = hidden_states_pad.view(-1, self.transition_dim, self.embedding_dim) return hidden_states_pad, n_pad @add_start_docstrings_to_model_forward( TRAJECTORY_TRANSFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length") ) @replace_return_docstrings(output_type=TrajectoryTransformerOutput, config_class=_CONFIG_FOR_DOC) def forward( self, trajectories: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, targets: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], TrajectoryTransformerOutput]: r""" Returns: Examples: ```python >>> from transformers import TrajectoryTransformerModel >>> import torch >>> model = TrajectoryTransformerModel.from_pretrained( ... "CarlCochet/trajectory-transformer-halfcheetah-medium-v2" ... ) >>> model.to(device) >>> model.eval() >>> observations_dim, action_dim, batch_size = 17, 6, 256 >>> seq_length = observations_dim + action_dim + 1 >>> trajectories = torch.LongTensor([np.random.permutation(self.seq_length) for _ in range(batch_size)]).to( ... device ... ) >>> targets = torch.LongTensor([np.random.permutation(self.seq_length) for _ in range(batch_size)]).to(device) >>> outputs = model( ... trajectories, ... targets=targets, ... use_cache=True, ... output_attentions=True, ... output_hidden_states=True, ... return_dict=True, ... ) ``` """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) if past_key_values is None: past_key_values = tuple([None] * len(self.blocks)) batch_size, sequence_length = trajectories.size() if sequence_length > self.block_size: raise ValueError("Cannot forward, model block size is exhausted.") offset_trajectories = self.offset_tokens(trajectories) # [ batch_size x sequence_length x embedding_dim ] # forward the GPT model token_embeddings = self.tok_emb(offset_trajectories) # each index maps to a (learnable) vector position_embeddings = self.pos_emb[:, :sequence_length, :] # each position maps to a (learnable) vector hidden_states = self.drop(token_embeddings + position_embeddings) presents = () if use_cache else None all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for i, (block, layer_past) in enumerate(zip(self.blocks, past_key_values)): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: if use_cache: logger.warning( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(block), hidden_states, layer_past, use_cache, output_attentions, ) else: outputs = block(hidden_states, layer_past, use_cache, output_attentions) hidden_states = outputs[0] if use_cache is True: presents = presents + (outputs[1],) if output_attentions: all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) # [ batch_size x sequence_length x embedding_dim ] hidden_state = self.ln_f(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) hidden_states_pad, n_pad = self.pad_to_full_observation(hidden_state) logits = self.head(hidden_states_pad) logits = logits.reshape(batch_size, sequence_length + n_pad, self.vocab_size + 1) logits = logits[:, :sequence_length] # if we are given some desired targets also calculate the loss if targets is not None: loss = F.cross_entropy(logits.reshape(-1, logits.size(-1)), targets.view(-1), reduction="none") if self.action_weight != 1 or self.reward_weight != 1 or self.value_weight != 1: # make weights n_states = int(np.ceil(sequence_length / self.transition_dim)) weights = torch.cat( [ torch.ones(self.observation_dim, device=trajectories.device), torch.ones(self.action_dim, device=trajectories.device) * self.action_weight, torch.ones(1, device=trajectories.device) * self.reward_weight, torch.ones(1, device=trajectories.device) * self.value_weight, ] ) weights = weights.repeat(n_states) weights = weights[1:].repeat(batch_size, 1) loss = loss * weights.view(-1) loss = (loss * attention_mask.view(-1)).mean() else: loss = None if not return_dict: return tuple(v for v in [loss, logits, presents, all_hidden_states, all_self_attentions] if v is not None) return TrajectoryTransformerOutput( loss=loss, logits=logits, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions, )
# coding=utf-8 # Copyright 2022 The Trajectory Transformers paper authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch TrajectoryTransformer model.""" import math import os from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch import torch.utils.checkpoint from torch import nn from torch.nn import functional as F from ...modeling_utils import PreTrainedModel from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_trajectory_transformer import TrajectoryTransformerConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "CarlCochet/trajectory-transformer-halfcheetah-medium-v2" _CONFIG_FOR_DOC = "TrajectoryTransformerConfig" TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [ "CarlCochet/trajectory-transformer-halfcheetah-medium-v2", # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer ] def load_tf_weights_in_trajectory_transformer(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f"Converting TensorFlow checkpoint from {tf_path}") # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): name = name.split("/") # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any( n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] for n in name ): logger.info(f"Skipping {'/'.join(name)}") continue pointer = model for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "kernel" or scope_names[0] == "gamma": pointer = getattr(pointer, "weight") elif scope_names[0] == "output_bias" or scope_names[0] == "beta": pointer = getattr(pointer, "bias") elif scope_names[0] == "output_weights": pointer = getattr(pointer, "weight") elif scope_names[0] == "squad": pointer = getattr(pointer, "classifier") else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info(f"Skipping {'/'.join(name)}") continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name[-11:] == "_embeddings": pointer = getattr(pointer, "weight") elif m_name == "kernel": array = np.transpose(array) try: if pointer.shape != array.shape: raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched") except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info(f"Initialize PyTorch weight {name}") pointer.data = torch.from_numpy(array) return model @dataclass class TrajectoryTransformerOutput(ModelOutput): """ Base class for model's outputs that also contains a pooling of the last hidden states. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Tuple[Tuple[torch.Tensor]]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of length `config.n_layers`, containing tuples of tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. GPT2Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None class TrajectoryTransformerPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = TrajectoryTransformerConfig load_tf_weights = load_tf_weights_in_trajectory_transformer base_model_prefix = "trajectory_transformer" main_input_name = "trajectories" supports_gradient_checkpointing = True def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, TrajectoryTransformerModel): module.gradient_checkpointing = value def _init_weights(self, module): if isinstance(module, (nn.Linear, nn.Embedding)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, EinLinear): for i in range(module.n_models): nn.init.kaiming_uniform_(module.weight[i], a=math.sqrt(5) / self.config.kaiming_initializer_range) if module.bias is not None: fan_in, _ = nn.init._calculate_fan_in_and_fan_out(module.weight[i]) bound = (1 / math.sqrt(fan_in)) * self.config.initializer_range nn.init.uniform_(module.bias[i], -bound, bound) TRAJECTORY_TRANSFORMER_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`TrajectoryTransformerConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ TRAJECTORY_TRANSFORMER_INPUTS_DOCSTRING = r""" Args: trajectories (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Batch of trajectories, where a trajectory is a sequence of states, actions and rewards. past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.n_layers`, *optional*): Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have their past given to this model should not be passed as `input_ids` as they have already been computed. targets (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Desired targets used to compute the loss. attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class EinLinear(nn.Module): def __init__(self, n_models, in_features, out_features, bias): super().__init__() self.n_models = n_models self.out_features = out_features self.in_features = in_features self.weight = nn.Parameter(torch.Tensor(n_models, out_features, in_features)) if bias: self.bias = nn.Parameter(torch.Tensor(n_models, out_features)) else: self.register_parameter("bias", None) def reset_parameters(self): for i in range(self.n_models): nn.init.kaiming_uniform_(self.weight[i], a=math.sqrt(5)) if self.bias is not None: fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight[i]) bound = 1 / math.sqrt(fan_in) nn.init.uniform_(self.bias[i], -bound, bound) def forward(self, input): """ Args: input (`torch.FloatTensor` of shape `(B, n_models, input_dim)`): The input to the layer. """ # [ batch_size x n_models x output_dim ] output = torch.einsum("eoi,bei->beo", self.weight, input) if self.bias is not None: raise RuntimeError() return output class CausalSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.n_embd % config.n_head != 0: raise ValueError(f"n_head ({config.n_head}) should be a divisor of n_embd ({config.n_embd})") # key, query, value projections for all heads self.key = nn.Linear(config.n_embd, config.n_embd) self.query = nn.Linear(config.n_embd, config.n_embd) self.value = nn.Linear(config.n_embd, config.n_embd) # regularization self.attn_drop = nn.Dropout(config.attn_pdrop) self.resid_drop = nn.Dropout(config.resid_pdrop) # output projection self.proj = nn.Linear(config.n_embd, config.n_embd) # causal mask to ensure that attention is only applied to the left in the input sequence self.register_buffer( "mask", torch.tril(torch.ones(config.block_size, config.block_size)).view( 1, 1, config.block_size, config.block_size ), ) # mask previous value estimates joined_dim = config.observation_dim + config.action_dim + 2 self.mask.squeeze()[:, joined_dim - 1 :: joined_dim] = 0 self.n_head = config.n_head def forward( self, hidden_states: Optional[Tuple[torch.FloatTensor]], layer_past: Optional[Tuple[torch.Tensor]] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, ): batch_size, sequence_length, embedding_dim = hidden_states.size() # calculate query, key, values for all heads in batch and move head forward to be the batch dim # [ batch_size x n_heads x sequence_length x head_dim ] key = ( self.key(hidden_states) .view(batch_size, sequence_length, self.n_head, embedding_dim // self.n_head) .transpose(1, 2) ) query = ( self.query(hidden_states) .view(batch_size, sequence_length, self.n_head, embedding_dim // self.n_head) .transpose(1, 2) ) value = ( self.value(hidden_states) .view(batch_size, sequence_length, self.n_head, embedding_dim // self.n_head) .transpose(1, 2) ) if layer_past is not None: past_key, past_value = layer_past key = torch.cat((past_key, key), dim=-2) value = torch.cat((past_value, value), dim=-2) if use_cache is True: present = (key, value) else: present = None # causal self-attention # [ batch_size x n_heads x sequence_length x sequence_length ] attn_weights = (torch.matmul(query, key.transpose(-2, -1))) * (1.0 / math.sqrt(key.size(-1))) attn_weights = attn_weights.masked_fill( self.mask[:, :, :sequence_length, :sequence_length] == 0, torch.finfo(attn_weights.dtype).min ) attn_weights = F.softmax(attn_weights, dim=-1) self._attn_map = attn_weights.clone() attn_weights = self.attn_drop(attn_weights) output = torch.matmul(attn_weights, value) # [ batch_size x sequence_length x embedding_dim ] # re-assemble all head outputs side by side output = output.transpose(1, 2).contiguous().view(batch_size, sequence_length, embedding_dim) # output projection output = self.resid_drop(self.proj(output)) outputs = (output, present) if output_attentions: outputs += (attn_weights,) return outputs class Block(nn.Module): def __init__(self, config): super().__init__() self.ln1 = nn.LayerNorm(config.n_embd) self.ln2 = nn.LayerNorm(config.n_embd) self.attn = CausalSelfAttention(config) # MLP self.l1 = nn.Linear(config.n_embd, 4 * config.n_embd) self.act = nn.GELU() self.l2 = nn.Linear(4 * config.n_embd, config.n_embd) self.drop = nn.Dropout(config.resid_pdrop) def forward( self, hidden_states: Optional[Tuple[torch.FloatTensor]], layer_past: Optional[Tuple[torch.Tensor]] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, ): residual = hidden_states hidden_states = self.ln1(hidden_states) attn_outputs = self.attn( hidden_states, layer_past=layer_past, use_cache=use_cache, output_attentions=output_attentions ) attn_output = attn_outputs[0] outputs = attn_outputs[1:] hidden_states = attn_output + residual residual = hidden_states hidden_states = self.ln2(hidden_states) hidden_states = self.l1(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.l2(hidden_states) hidden_states = residual + self.drop(hidden_states) if use_cache: outputs = (hidden_states,) + outputs else: outputs = (hidden_states,) + outputs[1:] return outputs @add_start_docstrings( "The bare TrajectoryTransformer Model transformer outputting raw hidden-states without any specific head on top.", TRAJECTORY_TRANSFORMER_START_DOCSTRING, ) class TrajectoryTransformerModel(TrajectoryTransformerPreTrainedModel): """the full GPT language model, with a context size of block_size""" def __init__(self, config): super().__init__(config) # input embedding stem (+1 for stop token) self.tok_emb = nn.Embedding(config.vocab_size * config.transition_dim + 1, config.n_embd) self.pos_emb = nn.Parameter(torch.zeros(1, config.block_size, config.n_embd)) self.drop = nn.Dropout(config.embd_pdrop) # transformer self.blocks = nn.ModuleList([Block(config) for _ in range(config.n_layer)]) # decoder head self.ln_f = nn.LayerNorm(config.n_embd) self.head = EinLinear(config.transition_dim, config.n_embd, config.vocab_size + 1, bias=False) self.vocab_size = config.vocab_size self.stop_token = config.vocab_size * config.transition_dim self.block_size = config.block_size self.observation_dim = config.observation_dim self.action_dim = config.action_dim self.transition_dim = config.transition_dim self.embedding_dim = config.n_embd self.action_weight = config.action_weight self.reward_weight = config.reward_weight self.value_weight = config.value_weight self.gradient_checkpointing = False self.post_init() def get_block_size(self): return self.block_size def offset_tokens(self, trajectories): _, sequence_length = trajectories.shape n_states = int(np.ceil(sequence_length / self.transition_dim)) offsets = torch.arange(self.transition_dim) * self.vocab_size offsets = offsets.repeat(n_states).to(trajectories.device) offset_trajectories = trajectories + offsets[:sequence_length] offset_trajectories[trajectories == self.vocab_size] = self.stop_token return offset_trajectories def pad_to_full_observation(self, hidden_states): batch_size, sequence_length, _ = hidden_states.shape n_pad = (self.transition_dim - sequence_length % self.transition_dim) % self.transition_dim padding = torch.zeros(batch_size, n_pad, self.embedding_dim, device=hidden_states.device) # [ batch_size x padded_sequence_length' x embedding_dim ] hidden_states_pad = torch.cat([hidden_states, padding], dim=1) hidden_states_pad = hidden_states_pad.view(-1, self.transition_dim, self.embedding_dim) return hidden_states_pad, n_pad @add_start_docstrings_to_model_forward( TRAJECTORY_TRANSFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length") ) @replace_return_docstrings(output_type=TrajectoryTransformerOutput, config_class=_CONFIG_FOR_DOC) def forward( self, trajectories: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, targets: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], TrajectoryTransformerOutput]: r""" Returns: Examples: ```python >>> from transformers import TrajectoryTransformerModel >>> import torch >>> model = TrajectoryTransformerModel.from_pretrained( ... "CarlCochet/trajectory-transformer-halfcheetah-medium-v2" ... ) >>> model.to(device) >>> model.eval() >>> observations_dim, action_dim, batch_size = 17, 6, 256 >>> seq_length = observations_dim + action_dim + 1 >>> trajectories = torch.LongTensor([np.random.permutation(self.seq_length) for _ in range(batch_size)]).to( ... device ... ) >>> targets = torch.LongTensor([np.random.permutation(self.seq_length) for _ in range(batch_size)]).to(device) >>> outputs = model( ... trajectories, ... targets=targets, ... use_cache=True, ... output_attentions=True, ... output_hidden_states=True, ... return_dict=True, ... ) ``` """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) if past_key_values is None: past_key_values = tuple([None] * len(self.blocks)) batch_size, sequence_length = trajectories.size() if sequence_length > self.block_size: raise ValueError("Cannot forward, model block size is exhausted.") offset_trajectories = self.offset_tokens(trajectories) # [ batch_size x sequence_length x embedding_dim ] # forward the GPT model token_embeddings = self.tok_emb(offset_trajectories) # each index maps to a (learnable) vector position_embeddings = self.pos_emb[:, :sequence_length, :] # each position maps to a (learnable) vector hidden_states = self.drop(token_embeddings + position_embeddings) presents = () if use_cache else None all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for i, (block, layer_past) in enumerate(zip(self.blocks, past_key_values)): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: if use_cache: logger.warning( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(block), hidden_states, layer_past, use_cache, output_attentions, ) else: outputs = block(hidden_states, layer_past, use_cache, output_attentions) hidden_states = outputs[0] if use_cache is True: presents = presents + (outputs[1],) if output_attentions: all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) # [ batch_size x sequence_length x embedding_dim ] hidden_state = self.ln_f(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) hidden_states_pad, n_pad = self.pad_to_full_observation(hidden_state) logits = self.head(hidden_states_pad) logits = logits.reshape(batch_size, sequence_length + n_pad, self.vocab_size + 1) logits = logits[:, :sequence_length] # if we are given some desired targets also calculate the loss if targets is not None: loss = F.cross_entropy(logits.reshape(-1, logits.size(-1)), targets.view(-1), reduction="none") if self.action_weight != 1 or self.reward_weight != 1 or self.value_weight != 1: # make weights n_states = int(np.ceil(sequence_length / self.transition_dim)) weights = torch.cat( [ torch.ones(self.observation_dim, device=trajectories.device), torch.ones(self.action_dim, device=trajectories.device) * self.action_weight, torch.ones(1, device=trajectories.device) * self.reward_weight, torch.ones(1, device=trajectories.device) * self.value_weight, ] ) weights = weights.repeat(n_states) weights = weights[1:].repeat(batch_size, 1) loss = loss * weights.view(-1) loss = (loss * attention_mask.view(-1)).mean() else: loss = None if not return_dict: return tuple(v for v in [loss, logits, presents, all_hidden_states, all_self_attentions] if v is not None) return TrajectoryTransformerOutput( loss=loss, logits=logits, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions, )
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py
# coding=utf-8 # Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Flax Wav2Vec2 model.""" from functools import partial from typing import Optional, Tuple, Union import numpy as np import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutput from ...modeling_flax_utils import ( ACT2FN, FlaxPreTrainedModel, append_replace_return_docstrings, overwrite_call_docstring, ) from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_wav2vec2 import Wav2Vec2Config logger = logging.get_logger(__name__) @flax.struct.dataclass class FlaxWav2Vec2BaseModelOutput(ModelOutput): """ Output type of [`FlaxWav2Vec2BaseModelOutput`], with potential hidden states and attentions. Args: last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. extract_features (`jnp.ndarray` of shape `(batch_size, sequence_length, last_conv_dim)`): Sequence of extracted feature vectors of the last convolutional layer of the model with `last_conv_dim` being the dimension of the last convolutional layer. hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: jnp.ndarray = None extract_features: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxWav2Vec2ForPreTrainingOutput(ModelOutput): """ Output type of [`FlaxWav2Vec2ForPreTrainingOutput`], with potential hidden states and attentions. Args: loss (*optional*, returned when model is in train mode, `jnp.ndarray` of shape `(1,)`): Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the [official paper](https://arxiv.org/pdf/2006.11477.pdf) . (classification) loss. projected_states (`jnp.ndarray` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`): Hidden-states of the model projected to *config.proj_codevector_dim* that can be used to predict the masked projected quantized states. projected_quantized_states (`jnp.ndarray` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`): Quantized extracted feature vectors projected to *config.proj_codevector_dim* representing the positive target vectors for contrastive loss. hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ projected_states: jnp.ndarray = None projected_quantized_states: jnp.ndarray = None codevector_perplexity: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None def _compute_mask_indices( shape: Tuple[int, int], mask_prob: float, mask_length: int, attention_mask: Optional[np.ndarray] = None, min_masks: int = 0, ) -> np.ndarray: """ Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on CPU as part of the preprocessing during training. Args: shape: the shape for which to compute masks. should be of size 2 where first element is batch size and 2nd is timesteps mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by number of timesteps divided by length of mask span to mask approximately this percentage of all elements. however due to overlaps, the actual number will be smaller (unless no_overlap is True) mask_length: size of the mask min_masks: minimum number of masked spans """ batch_size, sequence_length = shape if mask_length < 1: raise ValueError("`mask_length` has to be bigger than 0.") if mask_length > sequence_length: raise ValueError( f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and" f" `sequence_length`: {sequence_length}`" ) # compute number of masked spans in batch num_masked_spans = int(mask_prob * sequence_length / mask_length + np.random.rand(1).item()) num_masked_spans = max(num_masked_spans, min_masks) # make sure num masked indices <= sequence_length if num_masked_spans * mask_length > sequence_length: num_masked_spans = sequence_length // mask_length # SpecAugment mask to fill spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool) # get random indices to mask spec_aug_mask_idxs = np.array( [ np.random.choice(np.arange(sequence_length - (mask_length - 1)), num_masked_spans, replace=False) for _ in range(batch_size) ] ) # expand masked indices to masked spans spec_aug_mask_idxs = np.broadcast_to(spec_aug_mask_idxs[:, :, None], (batch_size, num_masked_spans, mask_length)) spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, num_masked_spans * mask_length) offsets = np.arange(mask_length)[None, None, :] offsets = np.broadcast_to(offsets, (batch_size, num_masked_spans, mask_length)).reshape( batch_size, num_masked_spans * mask_length ) spec_aug_mask_idxs = spec_aug_mask_idxs + offsets # scatter indices to mask np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1) if attention_mask is not None: # make sure padded input ids cannot be masked spec_aug_mask = np.where(attention_mask, spec_aug_mask, False) return spec_aug_mask def _sample_negative_indices(features_shape: Tuple, num_negatives: int, attention_mask: Optional[np.ndarray] = None): """ Sample `num_negatives` vectors from feature vectors. """ batch_size, sequence_length, hidden_size = features_shape if sequence_length <= 1: raise ValueError( "`features should have `sequence_length` > 1, but are of shape " f"(batch_size, sequence_length, hidden_size) = ({batch_size, sequence_length, hidden_size})." ) # get `num_negatives` random vector indices from the same utterance sampled_negative_indices = [] for batch_idx in range(batch_size): high = attention_mask[batch_idx].sum() - 1 if attention_mask is not None else sequence_length - 1 sampled_indices_slice = np.random.randint(0, high, size=(num_negatives * sequence_length,)) sampled_negative_indices.append(sampled_indices_slice) sampled_negative_indices = np.asarray(sampled_negative_indices, dtype=np.int32) # generate indices of the positive vectors themselves, repeat them `num_negatives` times feature_indices = np.broadcast_to(np.arange(sequence_length)[:, None], (sequence_length, num_negatives)).flatten() # avoid sampling the same positive vector, but keep the distribution uniform sampled_negative_indices[sampled_negative_indices >= feature_indices] += 1 # correct for batch size for batch_idx in range(1, batch_size): sampled_negative_indices[batch_idx] += batch_idx * sequence_length return sampled_negative_indices WAV_2_VEC_2_START_DOCSTRING = r""" Wav2Vec2 was proposed in [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli. This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a Flax Linen [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: config ([`Wav2Vec2Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and `jax.numpy.bfloat16` (on TPUs). This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given `dtype`. **Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.** If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and [`~FlaxPreTrainedModel.to_bf16`]. """ WAV_2_VEC_2_INPUTS_DOCSTRING = r""" Args: input_values (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Float values of input raw speech waveform. Values can be obtained by loading a *.flac* or *.wav* audio file into an array of type *List[float]* or a *numpy.ndarray*, *e.g.* via the soundfile library (*pip install soundfile*). To prepare the array into *input_values*, the [`Wav2Vec2Processor`] should be used for padding and conversion into a tensor of type *jnp.ndarray*. See [`Wav2Vec2Processor.__call__`] for details. attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) .. warning:: `attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask == True`. For all models whose processor has `config.return_attention_mask == False`, such as [wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base-960h), `attention_mask` should **not** be passed to avoid degraded performance when doing batched inference. For such models `input_values` should simply be padded with 0 and passed without `attention_mask`. Be aware that these models also yield slightly different results depending on whether `input_values` is padded or not. mask_time_indices (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict masked extracted features in *config.proj_codevector_dim* space. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class FlaxWav2Vec2LayerNormConvLayer(nn.Module): config: Wav2Vec2Config layer_id: int = 0 dtype: jnp.dtype = jnp.float32 def setup(self): self.in_conv_dim = self.config.conv_dim[self.layer_id] if self.layer_id > 0 else 1 self.out_conv_dim = self.config.conv_dim[self.layer_id] self.conv = nn.Conv( features=self.config.conv_dim[self.layer_id], kernel_size=(self.config.conv_kernel[self.layer_id],), strides=(self.config.conv_stride[self.layer_id],), use_bias=self.config.conv_bias, kernel_init=jax.nn.initializers.he_normal(), padding="VALID", dtype=self.dtype, ) self.layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.activation = ACT2FN[self.config.feat_extract_activation] def __call__(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.layer_norm(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states class FlaxConvWithWeightNorm(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.conv = nn.Conv( features=self.config.hidden_size, kernel_size=(self.config.num_conv_pos_embeddings,), kernel_init=jax.nn.initializers.he_normal(), padding="VALID", feature_group_count=self.config.num_conv_pos_embedding_groups, dtype=self.dtype, ) weight_shape = ( self.conv.features, self.conv.features // self.conv.feature_group_count, self.conv.kernel_size[0], ) self.weight_v = self.param("weight_v", jax.nn.initializers.he_normal(), weight_shape) self.weight_g = self.param("weight_g", lambda _: jnp.linalg.norm(self.weight_v, axis=(0, 1))[None, None, :]) self.bias = self.param("bias", jax.nn.initializers.zeros, (self.conv.features,)) self.prev_padding = self.conv.kernel_size[0] // 2 def _get_normed_weights(self): weight_v_norm = jnp.linalg.norm(self.weight_v, axis=(0, 1))[None, None, :] normed_weight_v = jnp.divide(self.weight_v, weight_v_norm) normed_kernel = jnp.multiply(normed_weight_v, self.weight_g) return normed_kernel def __call__(self, hidden_states): kernel = self._get_normed_weights() hidden_states = jnp.pad(hidden_states, ((0, 0), (self.prev_padding, self.prev_padding), (0, 0))) hidden_states = self.conv.apply({"params": {"kernel": kernel.T, "bias": self.bias}}, hidden_states) return hidden_states class FlaxWav2Vec2PositionalConvEmbedding(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.conv = FlaxConvWithWeightNorm(self.config, dtype=self.dtype) self.activation = ACT2FN[self.config.feat_extract_activation] self.num_pad_remove = 1 if self.config.num_conv_pos_embeddings % 2 == 0 else 0 def __call__(self, hidden_states): hidden_states = hidden_states.transpose((0, 1, 2)) hidden_states = self.conv(hidden_states) if self.num_pad_remove > 0: hidden_states = hidden_states[:, : -self.num_pad_remove, :] hidden_states = self.activation(hidden_states) hidden_states = hidden_states.transpose((0, 1, 2)) return hidden_states class FlaxConvLayersCollection(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): if self.config.feat_extract_norm == "layer": self.layers = [ FlaxWav2Vec2LayerNormConvLayer(self.config, layer_id=i, name=str(i), dtype=self.dtype) for i in range(self.config.num_feat_extract_layers) ] elif self.config.feat_extract_norm == "group": raise NotImplementedError("At the moment only ``config.feat_extact_norm == 'layer'`` is supported") else: raise ValueError( f"`config.feat_extract_norm` is {self.config.feat_extract_norm}, but has to be one of ['group'," " 'layer']" ) def __call__(self, hidden_states): for i, conv_layer in enumerate(self.layers): hidden_states = conv_layer(hidden_states) return hidden_states class FlaxWav2Vec2FeatureEncoder(nn.Module): """Construct the features from raw audio waveform""" config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.conv_layers = FlaxConvLayersCollection(self.config, dtype=self.dtype) def __call__(self, input_values, freeze_feature_encoder=False): hidden_states = input_values[:, :, None] hidden_states = self.conv_layers(hidden_states) if freeze_feature_encoder: hidden_states = jax.lax.stop_gradient(hidden_states) return hidden_states class FlaxWav2Vec2FeatureProjection(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.projection = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.dropout = nn.Dropout(rate=self.config.feat_proj_dropout) def __call__(self, hidden_states, deterministic=True): norm_hidden_states = self.layer_norm(hidden_states) hidden_states = self.projection(norm_hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) return hidden_states, norm_hidden_states class FlaxWav2Vec2Attention(nn.Module): config: Wav2Vec2Config embed_dim: int num_heads: int dropout: float = 0.0 bias: bool = True dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self) -> None: self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) dense = partial( nn.Dense, self.embed_dim, use_bias=self.bias, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense() self.out_proj = dense() self.dropout_layer = nn.Dropout(rate=self.dropout) def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,)) def __call__( self, hidden_states: jnp.ndarray, key_value_states: Optional[jnp.ndarray] = None, attention_mask: Optional[jnp.ndarray] = None, deterministic: bool = True, ) -> Tuple[jnp.ndarray]: """Input shape: Batch x Time x Channel""" # get query proj query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = self._split_heads(query_states) key_states = self._split_heads(key_states) value_states = self._split_heads(value_states) if attention_mask is not None: attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) # Convert the boolean attention mask to an attention bias. if attention_mask is not None: # attention mask in the form of attention bias attention_bias = lax.select( attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, float("-inf")).astype(self.dtype), ) else: attention_bias = None dropout_rng = None if not deterministic and self.dropout > 0.0: dropout_rng = self.make_rng("dropout") attn_weights = dot_product_attention_weights( query_states, key_states, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.dropout, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, precision=None, ) attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states) attn_output = self._merge_heads(attn_output) attn_output = self.out_proj(attn_output) return attn_output, attn_weights class FlaxWav2Vec2FeedForward(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.intermediate_dropout = nn.Dropout(rate=self.config.activation_dropout) self.intermediate_dense = nn.Dense( self.config.intermediate_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) if isinstance(self.config.hidden_act, str): self.intermediate_act_fn = ACT2FN[self.config.hidden_act] else: self.intermediate_act_fn = self.config.hidden_act self.output_dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.output_dropout = nn.Dropout(rate=self.config.hidden_dropout) def __call__(self, hidden_states, deterministic=True): hidden_states = self.intermediate_dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.intermediate_dropout(hidden_states, deterministic=deterministic) hidden_states = self.output_dense(hidden_states) hidden_states = self.output_dropout(hidden_states, deterministic=deterministic) return hidden_states class FlaxWav2Vec2EncoderLayerStableLayerNorm(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.attention = FlaxWav2Vec2Attention( config=self.config, embed_dim=self.config.hidden_size, num_heads=self.config.num_attention_heads, dropout=self.config.attention_dropout, dtype=self.dtype, ) self.dropout = nn.Dropout(rate=self.config.hidden_dropout) self.layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.feed_forward = FlaxWav2Vec2FeedForward(self.config, dtype=self.dtype) self.final_layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) def __call__(self, hidden_states, attention_mask=None, deterministic=True, output_attentions=False): attn_residual = hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states, attn_weights = self.attention( hidden_states, attention_mask=attention_mask, deterministic=deterministic ) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = attn_residual + hidden_states hidden_states = hidden_states + self.feed_forward( self.final_layer_norm(hidden_states), deterministic=deterministic ) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class FlaxWav2Vec2EncoderLayerStableLayerNormCollection(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.layers = [ FlaxWav2Vec2EncoderLayerStableLayerNorm(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers) ] def __call__( self, hidden_states, attention_mask=None, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for i, layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = layer( hidden_states, attention_mask, deterministic=deterministic, output_attentions=output_attentions ) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = (hidden_states, all_hidden_states, all_attentions) if not return_dict: return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) class FlaxWav2Vec2StableLayerNormEncoder(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.pos_conv_embed = FlaxWav2Vec2PositionalConvEmbedding(self.config, dtype=self.dtype) self.layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout) self.layers = FlaxWav2Vec2EncoderLayerStableLayerNormCollection(self.config, dtype=self.dtype) def __call__( self, hidden_states, attention_mask=None, deterministic=True, output_attentions=False, output_hidden_states=False, return_dict=True, ): if attention_mask is not None: # make sure padded tokens are not attended to hidden_states = jnp.where( jnp.broadcast_to(attention_mask[:, :, None], hidden_states.shape), hidden_states, 0 ) position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.dropout(hidden_states, deterministic=deterministic) outputs = self.layers( hidden_states, attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = self.layer_norm(outputs[0]) # update the last element in `hidden_states` after applying `layernorm` above hidden_states = None if output_hidden_states: hidden_states = outputs[1] hidden_states = hidden_states[:-1] + (last_hidden_state,) if not return_dict: outputs = (last_hidden_state, hidden_states) + (outputs[2:] if output_hidden_states else outputs[1:]) return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutput( last_hidden_state=last_hidden_state, hidden_states=hidden_states, attentions=outputs.attentions ) class FlaxWav2Vec2GumbelVectorQuantizer(nn.Module): """ Vector quantization using gumbel softmax. See [CATEGORICAL REPARAMETERIZATION WITH GUMBEL-SOFTMAX](https://arxiv.org/pdf/1611.01144.pdf) for more information. """ config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.num_groups = self.config.num_codevector_groups self.num_vars = self.config.num_codevectors_per_group if self.config.codevector_dim % self.num_groups != 0: raise ValueError( f"`config.codevector_dim {self.config.codevector_dim} must be divisible by" f" `config.num_codevector_groups` {self.num_groups} for concatenation" ) # storage for codebook variables (codewords) self.codevectors = self.param( "codevectors", jax.nn.initializers.uniform(), (1, self.num_groups * self.num_vars, self.config.codevector_dim // self.num_groups), ) self.weight_proj = nn.Dense( self.num_groups * self.num_vars, kernel_init=jax.nn.initializers.normal(1.0), dtype=self.dtype, ) @staticmethod def _compute_perplexity(probs, mask=None): if mask is not None: mask_extended = jnp.broadcast_to(mask.flatten()[:, None, None], probs.shape) probs = jnp.where(mask_extended, probs, jnp.zeros_like(probs)) marginal_probs = probs.sum(axis=0) / mask.sum() else: marginal_probs = probs.mean(axis=0) perplexity = jnp.exp(-jnp.sum(marginal_probs * jnp.log(marginal_probs + 1e-7), axis=-1)).sum() return perplexity def __call__(self, hidden_states, mask_time_indices=None, deterministic=True, temperature=1): batch_size, sequence_length, hidden_size = hidden_states.shape # project to codevector dim hidden_states = self.weight_proj(hidden_states) hidden_states = hidden_states.reshape(batch_size * sequence_length * self.num_groups, -1) if not deterministic: # sample code vector probs via gumbel in differentiateable way gumbel_rng = self.make_rng("gumbel") gumbels = jax.random.gumbel(gumbel_rng, hidden_states.shape) codevector_probs = nn.softmax((hidden_states + gumbels) / temperature) # compute perplexity codevector_soft_dist = nn.softmax( hidden_states.reshape(batch_size * sequence_length, self.num_groups, -1), axis=-1 ) perplexity = self._compute_perplexity(codevector_soft_dist, mask_time_indices) else: # take argmax in non-differentiable way # comptute hard codevector distribution (one hot) codevector_idx = hidden_states.argmax(axis=-1) codevector_probs = jax.nn.one_hot(codevector_idx, hidden_states.shape[-1]) * 1.0 codevector_probs = codevector_probs.reshape(batch_size * sequence_length, self.num_groups, -1) perplexity = self._compute_perplexity(codevector_probs, mask_time_indices) codevector_probs = codevector_probs.reshape(batch_size * sequence_length, -1) # use probs to retrieve codevectors codevectors_per_group = jnp.expand_dims(codevector_probs, axis=-1) * self.codevectors codevectors = codevectors_per_group.reshape(batch_size * sequence_length, self.num_groups, self.num_vars, -1) codevectors = codevectors.sum(-2).reshape(batch_size, sequence_length, -1) return codevectors, perplexity class FlaxWav2Vec2Adapter(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): # hidden_states require down-projection if feature dims don't match if self.config.output_hidden_size != self.config.hidden_size: self.proj = nn.Dense( self.config.output_hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.proj_layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) else: self.proj = self.proj_layer_norm = None self.layers = FlaxWav2Vec2AdapterLayersCollection(self.config, dtype=self.dtype) def __call__(self, hidden_states, deterministic=True): # down-project hidden_states if required if self.proj is not None and self.proj_layer_norm is not None: hidden_states = self.proj(hidden_states) hidden_states = self.proj_layer_norm(hidden_states) hidden_states = self.layers(hidden_states) return hidden_states class FlaxWav2Vec2AdapterLayer(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.conv = nn.Conv( features=2 * self.config.output_hidden_size, kernel_size=(self.config.adapter_kernel_size,), strides=(self.config.adapter_stride,), padding=((1, 1),), kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) def __call__(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = nn.glu(hidden_states, axis=2) return hidden_states class FlaxWav2Vec2AdapterLayersCollection(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.layers = [ FlaxWav2Vec2AdapterLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_adapter_layers) ] def __call__(self, hidden_states): for conv_layer in self.layers: hidden_states = conv_layer(hidden_states) return hidden_states class FlaxWav2Vec2PreTrainedModel(FlaxPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = Wav2Vec2Config base_model_prefix: str = "wav2vec2" main_input_name = "input_values" module_class: nn.Module = None def __init__( self, config: Wav2Vec2Config, input_shape: Tuple = (1, 1024), seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, **kwargs, ): module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors input_values = jnp.zeros(input_shape, dtype="i4") attention_mask = jnp.ones_like(input_values) params_rng, dropout_rng = jax.random.split(rng, 2) rngs = {"params": params_rng, "dropout": dropout_rng} random_params = self.module.init(rngs, input_values, attention_mask, return_dict=False)["params"] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params @add_start_docstrings_to_model_forward(WAV_2_VEC_2_INPUTS_DOCSTRING) def __call__( self, input_values, attention_mask=None, mask_time_indices=None, params: dict = None, dropout_rng: jax.random.PRNGKey = None, train: bool = False, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, freeze_feature_encoder: bool = False, return_dict: Optional[bool] = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict batch_size, sequence_length = input_values.shape if attention_mask is None: attention_mask = jnp.ones((batch_size, sequence_length)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng inputs = {"params": params or self.params} return self.module.apply( inputs, jnp.array(input_values, dtype="f4"), jnp.array(attention_mask, dtype="i4"), mask_time_indices, not train, output_attentions, output_hidden_states, freeze_feature_encoder, return_dict, rngs=rngs, ) def _get_feat_extract_output_lengths( self, input_lengths: Union[jnp.ndarray, int], add_adapter: Optional[bool] = None ): return self.module._get_feat_extract_output_lengths(input_lengths, add_adapter=add_adapter) class FlaxWav2Vec2Module(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.feature_extractor = FlaxWav2Vec2FeatureEncoder(self.config, dtype=self.dtype) self.feature_projection = FlaxWav2Vec2FeatureProjection(self.config, dtype=self.dtype) self.masked_spec_embed = self.param( "masked_spec_embed", jax.nn.initializers.uniform(), (self.config.hidden_size,) ) if self.config.do_stable_layer_norm: self.encoder = FlaxWav2Vec2StableLayerNormEncoder(self.config, dtype=self.dtype) else: raise NotImplementedError("``config.do_stable_layer_norm is False`` is currently not supported.") self.adapter = FlaxWav2Vec2Adapter(self.config, dtype=self.dtype) if self.config.add_adapter else None def __call__( self, input_values, attention_mask=None, mask_time_indices=None, deterministic=True, output_attentions=None, output_hidden_states=None, freeze_feature_encoder=False, return_dict=None, ): extract_features = self.feature_extractor(input_values, freeze_feature_encoder=freeze_feature_encoder) # make sure that no loss is computed on padded inputs if attention_mask is not None: # compute reduced attention_mask corresponding to feature vectors attention_mask = self._get_feature_vector_attention_mask( extract_features.shape[1], attention_mask, add_adapter=False ) hidden_states, extract_features = self.feature_projection(extract_features, deterministic=deterministic) if mask_time_indices is not None: # apply SpecAugment along time axis with given indices hidden_states = jnp.where( jnp.broadcast_to(mask_time_indices[:, :, None], hidden_states.shape), jnp.broadcast_to(self.masked_spec_embed[None, None, :], hidden_states.shape), hidden_states, ) encoder_outputs = self.encoder( hidden_states, attention_mask=attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = encoder_outputs[0] if self.adapter is not None: hidden_states = self.adapter(hidden_states) if not return_dict: return (hidden_states, extract_features) + encoder_outputs[1:] return FlaxWav2Vec2BaseModelOutput( last_hidden_state=hidden_states, extract_features=extract_features, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) def _get_feat_extract_output_lengths( self, input_lengths: Union[jnp.ndarray, int], add_adapter: Optional[bool] = None ): """ Computes the output length of the convolutional layers """ add_adapter = self.config.add_adapter if add_adapter is None else add_adapter def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return (input_length - kernel_size) // stride + 1 for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) if add_adapter: for _ in range(self.config.num_adapter_layers): input_lengths = _conv_out_length(input_lengths, 1, self.config.adapter_stride) return input_lengths def _get_feature_vector_attention_mask( self, feature_vector_length: int, attention_mask: jnp.ndarray, add_adapter=None ): # Effectively attention_mask.sum(-1), but not inplace to be able to run # on inference mode. non_padded_lengths = attention_mask.cumsum(axis=-1)[:, -1] output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths, add_adapter=add_adapter) batch_size = attention_mask.shape[0] attention_mask = jnp.zeros((batch_size, feature_vector_length), dtype=attention_mask.dtype) # these two operations makes sure that all values # before the output lengths indices are attended to attention_mask = attention_mask.at[jnp.arange(attention_mask.shape[0]), output_lengths - 1].set(1) attention_mask = jnp.flip(jnp.flip(attention_mask, -1).cumsum(-1), -1).astype("bool") return attention_mask @add_start_docstrings( "The bare Wav2Vec2 Model transformer outputting raw hidden-states without any specific head on top.", WAV_2_VEC_2_START_DOCSTRING, ) class FlaxWav2Vec2Model(FlaxWav2Vec2PreTrainedModel): module_class = FlaxWav2Vec2Module FLAX_WAV2VEC2_MODEL_DOCSTRING = """ Returns: Example: ```python >>> from transformers import Wav2Vec2Processor, FlaxWav2Vec2Model >>> from datasets import load_dataset >>> import soundfile as sf >>> processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-large-lv60") >>> model = FlaxWav2Vec2Model.from_pretrained("facebook/wav2vec2-large-lv60") >>> def map_to_array(batch): ... speech, _ = sf.read(batch["file"]) ... batch["speech"] = speech ... return batch >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> ds = ds.map(map_to_array) >>> input_values = processor( ... ds["speech"][0], sampling_rate=16_000, return_tensors="np" ... ).input_values # Batch size 1 >>> hidden_states = model(input_values).last_hidden_state ``` """ overwrite_call_docstring( FlaxWav2Vec2Model, WAV_2_VEC_2_INPUTS_DOCSTRING + FLAX_WAV2VEC2_MODEL_DOCSTRING, ) append_replace_return_docstrings( FlaxWav2Vec2Model, output_type=FlaxWav2Vec2BaseModelOutput, config_class=Wav2Vec2Config ) class FlaxWav2Vec2ForCTCModule(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.wav2vec2 = FlaxWav2Vec2Module(self.config, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.final_dropout) self.lm_head = nn.Dense( self.config.vocab_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) def __call__( self, input_values, attention_mask=None, mask_time_indices=None, deterministic=True, output_attentions=None, output_hidden_states=None, freeze_feature_encoder=False, return_dict=None, ): outputs = self.wav2vec2( input_values, attention_mask=attention_mask, mask_time_indices=mask_time_indices, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, freeze_feature_encoder=freeze_feature_encoder, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states, deterministic=deterministic) logits = self.lm_head(hidden_states) if not return_dict: return (logits,) + outputs[2:] return FlaxCausalLMOutput(logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def _get_feat_extract_output_lengths( self, input_lengths: Union[jnp.ndarray, int], add_adapter: Optional[bool] = None, ): """ Computes the output length of the convolutional layers """ add_adapter = self.config.add_adapter if add_adapter is None else add_adapter def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return (input_length - kernel_size) // stride + 1 for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) if add_adapter: for _ in range(self.config.num_adapter_layers): input_lengths = _conv_out_length(input_lengths, 1, self.config.adapter_stride) return input_lengths @add_start_docstrings( "Wav2Vec2 Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).", WAV_2_VEC_2_START_DOCSTRING, ) class FlaxWav2Vec2ForCTC(FlaxWav2Vec2PreTrainedModel): module_class = FlaxWav2Vec2ForCTCModule FLAX_WAV2VEC2_FOR_CTC_DOCSTRING = """ Returns: Example: ```python >>> import jax.numpy as jnp >>> from transformers import Wav2Vec2Processor, FlaxWav2Vec2ForCTC >>> from datasets import load_dataset >>> import soundfile as sf >>> processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-large-960h-lv60") >>> model = FlaxWav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h-lv60") >>> def map_to_array(batch): ... speech, _ = sf.read(batch["file"]) ... batch["speech"] = speech ... return batch >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> ds = ds.map(map_to_array) >>> input_values = processor( ... ds["speech"][0], sampling_rate=16_000, return_tensors="np" ... ).input_values # Batch size 1 >>> logits = model(input_values).logits >>> predicted_ids = jnp.argmax(logits, axis=-1) >>> transcription = processor.decode(predicted_ids[0]) >>> # should give: "A MAN SAID TO THE UNIVERSE SIR I EXIST" ``` """ overwrite_call_docstring( FlaxWav2Vec2ForCTC, WAV_2_VEC_2_INPUTS_DOCSTRING + FLAX_WAV2VEC2_FOR_CTC_DOCSTRING, ) append_replace_return_docstrings(FlaxWav2Vec2ForCTC, output_type=FlaxCausalLMOutput, config_class=Wav2Vec2Config) class FlaxWav2Vec2ForPreTrainingModule(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.wav2vec2 = FlaxWav2Vec2Module(self.config, dtype=self.dtype) self.dropout_features = nn.Dropout(self.config.feat_quantizer_dropout) self.quantizer = FlaxWav2Vec2GumbelVectorQuantizer(self.config, dtype=self.dtype) self.project_q = nn.Dense( self.config.proj_codevector_dim, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.project_hid = nn.Dense( self.config.proj_codevector_dim, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) def __call__( self, input_values, attention_mask=None, mask_time_indices=None, gumbel_temperature: int = 1, deterministic: bool = True, output_attentions=None, output_hidden_states=None, freeze_feature_encoder=False, return_dict=None, ): r""" Returns: Example: ```python ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.wav2vec2( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, mask_time_indices=mask_time_indices, deterministic=deterministic, freeze_feature_encoder=freeze_feature_encoder, return_dict=return_dict, ) # project all transformed features (including masked) to final vq dim transformer_features = self.project_hid(outputs[0]) # quantize all (unmasked) extracted features and project to final vq dim extract_features = self.dropout_features(outputs[1], deterministic=deterministic) quantized_features, codevector_perplexity = self.quantizer( extract_features, mask_time_indices, deterministic=deterministic, temperature=gumbel_temperature ) quantized_features = self.project_q(quantized_features) if not return_dict: return (transformer_features, quantized_features, codevector_perplexity) + outputs[2:] return FlaxWav2Vec2ForPreTrainingOutput( projected_states=transformer_features, projected_quantized_states=quantized_features, codevector_perplexity=codevector_perplexity, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def _get_feat_extract_output_lengths( self, input_lengths: Union[jnp.ndarray, int], add_adapter: Optional[bool] = None ): """ Computes the output length of the convolutional layers """ add_adapter = self.config.add_adapter if add_adapter is None else add_adapter def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return (input_length - kernel_size) // stride + 1 for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) if add_adapter: for _ in range(self.config.num_adapter_layers): input_lengths = _conv_out_length(input_lengths, 1, self.config.adapter_stride) return input_lengths @add_start_docstrings("""Wav2Vec2 Model with a quantizer and `VQ` head on top.""", WAV_2_VEC_2_START_DOCSTRING) class FlaxWav2Vec2ForPreTraining(FlaxWav2Vec2PreTrainedModel): module_class = FlaxWav2Vec2ForPreTrainingModule @add_start_docstrings_to_model_forward(WAV_2_VEC_2_INPUTS_DOCSTRING) # overwrite since has `gumbel_temperature` input def __call__( self, input_values, attention_mask=None, mask_time_indices=None, gumbel_temperature: int = 1, params: dict = None, dropout_rng: jax.random.PRNGKey = None, gumbel_rng: jax.random.PRNGKey = None, train: bool = False, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, freeze_feature_encoder: bool = False, return_dict: Optional[bool] = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict batch_size, sequence_length = input_values.shape if attention_mask is None: attention_mask = jnp.ones((batch_size, sequence_length)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng if gumbel_rng is not None: rngs["gumbel"] = gumbel_rng inputs = {"params": params or self.params} return self.module.apply( inputs, jnp.array(input_values, dtype="f4"), jnp.array(attention_mask, dtype="i4"), mask_time_indices, gumbel_temperature, not train, output_attentions, output_hidden_states, freeze_feature_encoder, return_dict, rngs=rngs, ) FLAX_WAV2VEC2_FOR_PRETRAINING_DOCSTRING = """ Returns: Example: ```python >>> import optax >>> import numpy as np >>> import jax.numpy as jnp >>> from transformers import Wav2Vec2FeatureExtractor, FlaxWav2Vec2ForPreTraining >>> from transformers.models.wav2vec2.modeling_flax_wav2vec2 import _compute_mask_indices >>> from datasets import load_dataset >>> import soundfile as sf >>> feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("facebook/wav2vec2-large-lv60") >>> model = FlaxWav2Vec2ForPreTraining.from_pretrained("facebook/wav2vec2-large-lv60") >>> def map_to_array(batch): ... speech, _ = sf.read(batch["file"]) ... batch["speech"] = speech ... return batch >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> ds = ds.map(map_to_array) >>> input_values = feature_extractor(ds["speech"][0], return_tensors="np").input_values # Batch size 1 >>> # compute masked indices >>> batch_size, raw_sequence_length = input_values.shape >>> sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length) >>> mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=0.2, mask_length=2) >>> outputs = model(input_values, mask_time_indices=mask_time_indices) >>> # compute cosine similarity between predicted (=projected_states) and target (=projected_quantized_states) >>> cosine_sim = optax.cosine_similarity(outputs.projected_states, outputs.projected_quantized_states) >>> # show that cosine similarity is much higher than random >>> assert np.asarray(cosine_sim)[mask_time_indices].mean() > 0.5 ``` """ overwrite_call_docstring( FlaxWav2Vec2ForPreTraining, WAV_2_VEC_2_INPUTS_DOCSTRING + FLAX_WAV2VEC2_FOR_PRETRAINING_DOCSTRING, ) append_replace_return_docstrings( FlaxWav2Vec2ForPreTraining, output_type=FlaxWav2Vec2ForPreTrainingOutput, config_class=Wav2Vec2Config )
# coding=utf-8 # Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Flax Wav2Vec2 model.""" from functools import partial from typing import Optional, Tuple, Union import numpy as np import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutput from ...modeling_flax_utils import ( ACT2FN, FlaxPreTrainedModel, append_replace_return_docstrings, overwrite_call_docstring, ) from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_wav2vec2 import Wav2Vec2Config logger = logging.get_logger(__name__) @flax.struct.dataclass class FlaxWav2Vec2BaseModelOutput(ModelOutput): """ Output type of [`FlaxWav2Vec2BaseModelOutput`], with potential hidden states and attentions. Args: last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. extract_features (`jnp.ndarray` of shape `(batch_size, sequence_length, last_conv_dim)`): Sequence of extracted feature vectors of the last convolutional layer of the model with `last_conv_dim` being the dimension of the last convolutional layer. hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: jnp.ndarray = None extract_features: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxWav2Vec2ForPreTrainingOutput(ModelOutput): """ Output type of [`FlaxWav2Vec2ForPreTrainingOutput`], with potential hidden states and attentions. Args: loss (*optional*, returned when model is in train mode, `jnp.ndarray` of shape `(1,)`): Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the [official paper](https://arxiv.org/pdf/2006.11477.pdf) . (classification) loss. projected_states (`jnp.ndarray` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`): Hidden-states of the model projected to *config.proj_codevector_dim* that can be used to predict the masked projected quantized states. projected_quantized_states (`jnp.ndarray` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`): Quantized extracted feature vectors projected to *config.proj_codevector_dim* representing the positive target vectors for contrastive loss. hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ projected_states: jnp.ndarray = None projected_quantized_states: jnp.ndarray = None codevector_perplexity: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None def _compute_mask_indices( shape: Tuple[int, int], mask_prob: float, mask_length: int, attention_mask: Optional[np.ndarray] = None, min_masks: int = 0, ) -> np.ndarray: """ Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on CPU as part of the preprocessing during training. Args: shape: the shape for which to compute masks. should be of size 2 where first element is batch size and 2nd is timesteps mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by number of timesteps divided by length of mask span to mask approximately this percentage of all elements. however due to overlaps, the actual number will be smaller (unless no_overlap is True) mask_length: size of the mask min_masks: minimum number of masked spans """ batch_size, sequence_length = shape if mask_length < 1: raise ValueError("`mask_length` has to be bigger than 0.") if mask_length > sequence_length: raise ValueError( f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and" f" `sequence_length`: {sequence_length}`" ) # compute number of masked spans in batch num_masked_spans = int(mask_prob * sequence_length / mask_length + np.random.rand(1).item()) num_masked_spans = max(num_masked_spans, min_masks) # make sure num masked indices <= sequence_length if num_masked_spans * mask_length > sequence_length: num_masked_spans = sequence_length // mask_length # SpecAugment mask to fill spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool) # get random indices to mask spec_aug_mask_idxs = np.array( [ np.random.choice(np.arange(sequence_length - (mask_length - 1)), num_masked_spans, replace=False) for _ in range(batch_size) ] ) # expand masked indices to masked spans spec_aug_mask_idxs = np.broadcast_to(spec_aug_mask_idxs[:, :, None], (batch_size, num_masked_spans, mask_length)) spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, num_masked_spans * mask_length) offsets = np.arange(mask_length)[None, None, :] offsets = np.broadcast_to(offsets, (batch_size, num_masked_spans, mask_length)).reshape( batch_size, num_masked_spans * mask_length ) spec_aug_mask_idxs = spec_aug_mask_idxs + offsets # scatter indices to mask np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1) if attention_mask is not None: # make sure padded input ids cannot be masked spec_aug_mask = np.where(attention_mask, spec_aug_mask, False) return spec_aug_mask def _sample_negative_indices(features_shape: Tuple, num_negatives: int, attention_mask: Optional[np.ndarray] = None): """ Sample `num_negatives` vectors from feature vectors. """ batch_size, sequence_length, hidden_size = features_shape if sequence_length <= 1: raise ValueError( "`features should have `sequence_length` > 1, but are of shape " f"(batch_size, sequence_length, hidden_size) = ({batch_size, sequence_length, hidden_size})." ) # get `num_negatives` random vector indices from the same utterance sampled_negative_indices = [] for batch_idx in range(batch_size): high = attention_mask[batch_idx].sum() - 1 if attention_mask is not None else sequence_length - 1 sampled_indices_slice = np.random.randint(0, high, size=(num_negatives * sequence_length,)) sampled_negative_indices.append(sampled_indices_slice) sampled_negative_indices = np.asarray(sampled_negative_indices, dtype=np.int32) # generate indices of the positive vectors themselves, repeat them `num_negatives` times feature_indices = np.broadcast_to(np.arange(sequence_length)[:, None], (sequence_length, num_negatives)).flatten() # avoid sampling the same positive vector, but keep the distribution uniform sampled_negative_indices[sampled_negative_indices >= feature_indices] += 1 # correct for batch size for batch_idx in range(1, batch_size): sampled_negative_indices[batch_idx] += batch_idx * sequence_length return sampled_negative_indices WAV_2_VEC_2_START_DOCSTRING = r""" Wav2Vec2 was proposed in [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli. This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a Flax Linen [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: config ([`Wav2Vec2Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and `jax.numpy.bfloat16` (on TPUs). This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given `dtype`. **Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.** If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and [`~FlaxPreTrainedModel.to_bf16`]. """ WAV_2_VEC_2_INPUTS_DOCSTRING = r""" Args: input_values (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Float values of input raw speech waveform. Values can be obtained by loading a *.flac* or *.wav* audio file into an array of type *List[float]* or a *numpy.ndarray*, *e.g.* via the soundfile library (*pip install soundfile*). To prepare the array into *input_values*, the [`Wav2Vec2Processor`] should be used for padding and conversion into a tensor of type *jnp.ndarray*. See [`Wav2Vec2Processor.__call__`] for details. attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) .. warning:: `attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask == True`. For all models whose processor has `config.return_attention_mask == False`, such as [wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base-960h), `attention_mask` should **not** be passed to avoid degraded performance when doing batched inference. For such models `input_values` should simply be padded with 0 and passed without `attention_mask`. Be aware that these models also yield slightly different results depending on whether `input_values` is padded or not. mask_time_indices (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict masked extracted features in *config.proj_codevector_dim* space. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class FlaxWav2Vec2LayerNormConvLayer(nn.Module): config: Wav2Vec2Config layer_id: int = 0 dtype: jnp.dtype = jnp.float32 def setup(self): self.in_conv_dim = self.config.conv_dim[self.layer_id] if self.layer_id > 0 else 1 self.out_conv_dim = self.config.conv_dim[self.layer_id] self.conv = nn.Conv( features=self.config.conv_dim[self.layer_id], kernel_size=(self.config.conv_kernel[self.layer_id],), strides=(self.config.conv_stride[self.layer_id],), use_bias=self.config.conv_bias, kernel_init=jax.nn.initializers.he_normal(), padding="VALID", dtype=self.dtype, ) self.layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.activation = ACT2FN[self.config.feat_extract_activation] def __call__(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.layer_norm(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states class FlaxConvWithWeightNorm(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.conv = nn.Conv( features=self.config.hidden_size, kernel_size=(self.config.num_conv_pos_embeddings,), kernel_init=jax.nn.initializers.he_normal(), padding="VALID", feature_group_count=self.config.num_conv_pos_embedding_groups, dtype=self.dtype, ) weight_shape = ( self.conv.features, self.conv.features // self.conv.feature_group_count, self.conv.kernel_size[0], ) self.weight_v = self.param("weight_v", jax.nn.initializers.he_normal(), weight_shape) self.weight_g = self.param("weight_g", lambda _: jnp.linalg.norm(self.weight_v, axis=(0, 1))[None, None, :]) self.bias = self.param("bias", jax.nn.initializers.zeros, (self.conv.features,)) self.prev_padding = self.conv.kernel_size[0] // 2 def _get_normed_weights(self): weight_v_norm = jnp.linalg.norm(self.weight_v, axis=(0, 1))[None, None, :] normed_weight_v = jnp.divide(self.weight_v, weight_v_norm) normed_kernel = jnp.multiply(normed_weight_v, self.weight_g) return normed_kernel def __call__(self, hidden_states): kernel = self._get_normed_weights() hidden_states = jnp.pad(hidden_states, ((0, 0), (self.prev_padding, self.prev_padding), (0, 0))) hidden_states = self.conv.apply({"params": {"kernel": kernel.T, "bias": self.bias}}, hidden_states) return hidden_states class FlaxWav2Vec2PositionalConvEmbedding(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.conv = FlaxConvWithWeightNorm(self.config, dtype=self.dtype) self.activation = ACT2FN[self.config.feat_extract_activation] self.num_pad_remove = 1 if self.config.num_conv_pos_embeddings % 2 == 0 else 0 def __call__(self, hidden_states): hidden_states = hidden_states.transpose((0, 1, 2)) hidden_states = self.conv(hidden_states) if self.num_pad_remove > 0: hidden_states = hidden_states[:, : -self.num_pad_remove, :] hidden_states = self.activation(hidden_states) hidden_states = hidden_states.transpose((0, 1, 2)) return hidden_states class FlaxConvLayersCollection(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): if self.config.feat_extract_norm == "layer": self.layers = [ FlaxWav2Vec2LayerNormConvLayer(self.config, layer_id=i, name=str(i), dtype=self.dtype) for i in range(self.config.num_feat_extract_layers) ] elif self.config.feat_extract_norm == "group": raise NotImplementedError("At the moment only ``config.feat_extact_norm == 'layer'`` is supported") else: raise ValueError( f"`config.feat_extract_norm` is {self.config.feat_extract_norm}, but has to be one of ['group'," " 'layer']" ) def __call__(self, hidden_states): for i, conv_layer in enumerate(self.layers): hidden_states = conv_layer(hidden_states) return hidden_states class FlaxWav2Vec2FeatureEncoder(nn.Module): """Construct the features from raw audio waveform""" config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.conv_layers = FlaxConvLayersCollection(self.config, dtype=self.dtype) def __call__(self, input_values, freeze_feature_encoder=False): hidden_states = input_values[:, :, None] hidden_states = self.conv_layers(hidden_states) if freeze_feature_encoder: hidden_states = jax.lax.stop_gradient(hidden_states) return hidden_states class FlaxWav2Vec2FeatureProjection(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.projection = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.dropout = nn.Dropout(rate=self.config.feat_proj_dropout) def __call__(self, hidden_states, deterministic=True): norm_hidden_states = self.layer_norm(hidden_states) hidden_states = self.projection(norm_hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) return hidden_states, norm_hidden_states class FlaxWav2Vec2Attention(nn.Module): config: Wav2Vec2Config embed_dim: int num_heads: int dropout: float = 0.0 bias: bool = True dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self) -> None: self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) dense = partial( nn.Dense, self.embed_dim, use_bias=self.bias, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense() self.out_proj = dense() self.dropout_layer = nn.Dropout(rate=self.dropout) def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,)) def __call__( self, hidden_states: jnp.ndarray, key_value_states: Optional[jnp.ndarray] = None, attention_mask: Optional[jnp.ndarray] = None, deterministic: bool = True, ) -> Tuple[jnp.ndarray]: """Input shape: Batch x Time x Channel""" # get query proj query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = self._split_heads(query_states) key_states = self._split_heads(key_states) value_states = self._split_heads(value_states) if attention_mask is not None: attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) # Convert the boolean attention mask to an attention bias. if attention_mask is not None: # attention mask in the form of attention bias attention_bias = lax.select( attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, float("-inf")).astype(self.dtype), ) else: attention_bias = None dropout_rng = None if not deterministic and self.dropout > 0.0: dropout_rng = self.make_rng("dropout") attn_weights = dot_product_attention_weights( query_states, key_states, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.dropout, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, precision=None, ) attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states) attn_output = self._merge_heads(attn_output) attn_output = self.out_proj(attn_output) return attn_output, attn_weights class FlaxWav2Vec2FeedForward(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.intermediate_dropout = nn.Dropout(rate=self.config.activation_dropout) self.intermediate_dense = nn.Dense( self.config.intermediate_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) if isinstance(self.config.hidden_act, str): self.intermediate_act_fn = ACT2FN[self.config.hidden_act] else: self.intermediate_act_fn = self.config.hidden_act self.output_dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.output_dropout = nn.Dropout(rate=self.config.hidden_dropout) def __call__(self, hidden_states, deterministic=True): hidden_states = self.intermediate_dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.intermediate_dropout(hidden_states, deterministic=deterministic) hidden_states = self.output_dense(hidden_states) hidden_states = self.output_dropout(hidden_states, deterministic=deterministic) return hidden_states class FlaxWav2Vec2EncoderLayerStableLayerNorm(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.attention = FlaxWav2Vec2Attention( config=self.config, embed_dim=self.config.hidden_size, num_heads=self.config.num_attention_heads, dropout=self.config.attention_dropout, dtype=self.dtype, ) self.dropout = nn.Dropout(rate=self.config.hidden_dropout) self.layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.feed_forward = FlaxWav2Vec2FeedForward(self.config, dtype=self.dtype) self.final_layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) def __call__(self, hidden_states, attention_mask=None, deterministic=True, output_attentions=False): attn_residual = hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states, attn_weights = self.attention( hidden_states, attention_mask=attention_mask, deterministic=deterministic ) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = attn_residual + hidden_states hidden_states = hidden_states + self.feed_forward( self.final_layer_norm(hidden_states), deterministic=deterministic ) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class FlaxWav2Vec2EncoderLayerStableLayerNormCollection(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.layers = [ FlaxWav2Vec2EncoderLayerStableLayerNorm(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers) ] def __call__( self, hidden_states, attention_mask=None, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for i, layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = layer( hidden_states, attention_mask, deterministic=deterministic, output_attentions=output_attentions ) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = (hidden_states, all_hidden_states, all_attentions) if not return_dict: return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) class FlaxWav2Vec2StableLayerNormEncoder(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.pos_conv_embed = FlaxWav2Vec2PositionalConvEmbedding(self.config, dtype=self.dtype) self.layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout) self.layers = FlaxWav2Vec2EncoderLayerStableLayerNormCollection(self.config, dtype=self.dtype) def __call__( self, hidden_states, attention_mask=None, deterministic=True, output_attentions=False, output_hidden_states=False, return_dict=True, ): if attention_mask is not None: # make sure padded tokens are not attended to hidden_states = jnp.where( jnp.broadcast_to(attention_mask[:, :, None], hidden_states.shape), hidden_states, 0 ) position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.dropout(hidden_states, deterministic=deterministic) outputs = self.layers( hidden_states, attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = self.layer_norm(outputs[0]) # update the last element in `hidden_states` after applying `layernorm` above hidden_states = None if output_hidden_states: hidden_states = outputs[1] hidden_states = hidden_states[:-1] + (last_hidden_state,) if not return_dict: outputs = (last_hidden_state, hidden_states) + (outputs[2:] if output_hidden_states else outputs[1:]) return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutput( last_hidden_state=last_hidden_state, hidden_states=hidden_states, attentions=outputs.attentions ) class FlaxWav2Vec2GumbelVectorQuantizer(nn.Module): """ Vector quantization using gumbel softmax. See [CATEGORICAL REPARAMETERIZATION WITH GUMBEL-SOFTMAX](https://arxiv.org/pdf/1611.01144.pdf) for more information. """ config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.num_groups = self.config.num_codevector_groups self.num_vars = self.config.num_codevectors_per_group if self.config.codevector_dim % self.num_groups != 0: raise ValueError( f"`config.codevector_dim {self.config.codevector_dim} must be divisible by" f" `config.num_codevector_groups` {self.num_groups} for concatenation" ) # storage for codebook variables (codewords) self.codevectors = self.param( "codevectors", jax.nn.initializers.uniform(), (1, self.num_groups * self.num_vars, self.config.codevector_dim // self.num_groups), ) self.weight_proj = nn.Dense( self.num_groups * self.num_vars, kernel_init=jax.nn.initializers.normal(1.0), dtype=self.dtype, ) @staticmethod def _compute_perplexity(probs, mask=None): if mask is not None: mask_extended = jnp.broadcast_to(mask.flatten()[:, None, None], probs.shape) probs = jnp.where(mask_extended, probs, jnp.zeros_like(probs)) marginal_probs = probs.sum(axis=0) / mask.sum() else: marginal_probs = probs.mean(axis=0) perplexity = jnp.exp(-jnp.sum(marginal_probs * jnp.log(marginal_probs + 1e-7), axis=-1)).sum() return perplexity def __call__(self, hidden_states, mask_time_indices=None, deterministic=True, temperature=1): batch_size, sequence_length, hidden_size = hidden_states.shape # project to codevector dim hidden_states = self.weight_proj(hidden_states) hidden_states = hidden_states.reshape(batch_size * sequence_length * self.num_groups, -1) if not deterministic: # sample code vector probs via gumbel in differentiateable way gumbel_rng = self.make_rng("gumbel") gumbels = jax.random.gumbel(gumbel_rng, hidden_states.shape) codevector_probs = nn.softmax((hidden_states + gumbels) / temperature) # compute perplexity codevector_soft_dist = nn.softmax( hidden_states.reshape(batch_size * sequence_length, self.num_groups, -1), axis=-1 ) perplexity = self._compute_perplexity(codevector_soft_dist, mask_time_indices) else: # take argmax in non-differentiable way # comptute hard codevector distribution (one hot) codevector_idx = hidden_states.argmax(axis=-1) codevector_probs = jax.nn.one_hot(codevector_idx, hidden_states.shape[-1]) * 1.0 codevector_probs = codevector_probs.reshape(batch_size * sequence_length, self.num_groups, -1) perplexity = self._compute_perplexity(codevector_probs, mask_time_indices) codevector_probs = codevector_probs.reshape(batch_size * sequence_length, -1) # use probs to retrieve codevectors codevectors_per_group = jnp.expand_dims(codevector_probs, axis=-1) * self.codevectors codevectors = codevectors_per_group.reshape(batch_size * sequence_length, self.num_groups, self.num_vars, -1) codevectors = codevectors.sum(-2).reshape(batch_size, sequence_length, -1) return codevectors, perplexity class FlaxWav2Vec2Adapter(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): # hidden_states require down-projection if feature dims don't match if self.config.output_hidden_size != self.config.hidden_size: self.proj = nn.Dense( self.config.output_hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.proj_layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) else: self.proj = self.proj_layer_norm = None self.layers = FlaxWav2Vec2AdapterLayersCollection(self.config, dtype=self.dtype) def __call__(self, hidden_states, deterministic=True): # down-project hidden_states if required if self.proj is not None and self.proj_layer_norm is not None: hidden_states = self.proj(hidden_states) hidden_states = self.proj_layer_norm(hidden_states) hidden_states = self.layers(hidden_states) return hidden_states class FlaxWav2Vec2AdapterLayer(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.conv = nn.Conv( features=2 * self.config.output_hidden_size, kernel_size=(self.config.adapter_kernel_size,), strides=(self.config.adapter_stride,), padding=((1, 1),), kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) def __call__(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = nn.glu(hidden_states, axis=2) return hidden_states class FlaxWav2Vec2AdapterLayersCollection(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.layers = [ FlaxWav2Vec2AdapterLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_adapter_layers) ] def __call__(self, hidden_states): for conv_layer in self.layers: hidden_states = conv_layer(hidden_states) return hidden_states class FlaxWav2Vec2PreTrainedModel(FlaxPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = Wav2Vec2Config base_model_prefix: str = "wav2vec2" main_input_name = "input_values" module_class: nn.Module = None def __init__( self, config: Wav2Vec2Config, input_shape: Tuple = (1, 1024), seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, **kwargs, ): module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors input_values = jnp.zeros(input_shape, dtype="i4") attention_mask = jnp.ones_like(input_values) params_rng, dropout_rng = jax.random.split(rng, 2) rngs = {"params": params_rng, "dropout": dropout_rng} random_params = self.module.init(rngs, input_values, attention_mask, return_dict=False)["params"] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params @add_start_docstrings_to_model_forward(WAV_2_VEC_2_INPUTS_DOCSTRING) def __call__( self, input_values, attention_mask=None, mask_time_indices=None, params: dict = None, dropout_rng: jax.random.PRNGKey = None, train: bool = False, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, freeze_feature_encoder: bool = False, return_dict: Optional[bool] = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict batch_size, sequence_length = input_values.shape if attention_mask is None: attention_mask = jnp.ones((batch_size, sequence_length)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng inputs = {"params": params or self.params} return self.module.apply( inputs, jnp.array(input_values, dtype="f4"), jnp.array(attention_mask, dtype="i4"), mask_time_indices, not train, output_attentions, output_hidden_states, freeze_feature_encoder, return_dict, rngs=rngs, ) def _get_feat_extract_output_lengths( self, input_lengths: Union[jnp.ndarray, int], add_adapter: Optional[bool] = None ): return self.module._get_feat_extract_output_lengths(input_lengths, add_adapter=add_adapter) class FlaxWav2Vec2Module(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.feature_extractor = FlaxWav2Vec2FeatureEncoder(self.config, dtype=self.dtype) self.feature_projection = FlaxWav2Vec2FeatureProjection(self.config, dtype=self.dtype) self.masked_spec_embed = self.param( "masked_spec_embed", jax.nn.initializers.uniform(), (self.config.hidden_size,) ) if self.config.do_stable_layer_norm: self.encoder = FlaxWav2Vec2StableLayerNormEncoder(self.config, dtype=self.dtype) else: raise NotImplementedError("``config.do_stable_layer_norm is False`` is currently not supported.") self.adapter = FlaxWav2Vec2Adapter(self.config, dtype=self.dtype) if self.config.add_adapter else None def __call__( self, input_values, attention_mask=None, mask_time_indices=None, deterministic=True, output_attentions=None, output_hidden_states=None, freeze_feature_encoder=False, return_dict=None, ): extract_features = self.feature_extractor(input_values, freeze_feature_encoder=freeze_feature_encoder) # make sure that no loss is computed on padded inputs if attention_mask is not None: # compute reduced attention_mask corresponding to feature vectors attention_mask = self._get_feature_vector_attention_mask( extract_features.shape[1], attention_mask, add_adapter=False ) hidden_states, extract_features = self.feature_projection(extract_features, deterministic=deterministic) if mask_time_indices is not None: # apply SpecAugment along time axis with given indices hidden_states = jnp.where( jnp.broadcast_to(mask_time_indices[:, :, None], hidden_states.shape), jnp.broadcast_to(self.masked_spec_embed[None, None, :], hidden_states.shape), hidden_states, ) encoder_outputs = self.encoder( hidden_states, attention_mask=attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = encoder_outputs[0] if self.adapter is not None: hidden_states = self.adapter(hidden_states) if not return_dict: return (hidden_states, extract_features) + encoder_outputs[1:] return FlaxWav2Vec2BaseModelOutput( last_hidden_state=hidden_states, extract_features=extract_features, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) def _get_feat_extract_output_lengths( self, input_lengths: Union[jnp.ndarray, int], add_adapter: Optional[bool] = None ): """ Computes the output length of the convolutional layers """ add_adapter = self.config.add_adapter if add_adapter is None else add_adapter def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return (input_length - kernel_size) // stride + 1 for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) if add_adapter: for _ in range(self.config.num_adapter_layers): input_lengths = _conv_out_length(input_lengths, 1, self.config.adapter_stride) return input_lengths def _get_feature_vector_attention_mask( self, feature_vector_length: int, attention_mask: jnp.ndarray, add_adapter=None ): # Effectively attention_mask.sum(-1), but not inplace to be able to run # on inference mode. non_padded_lengths = attention_mask.cumsum(axis=-1)[:, -1] output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths, add_adapter=add_adapter) batch_size = attention_mask.shape[0] attention_mask = jnp.zeros((batch_size, feature_vector_length), dtype=attention_mask.dtype) # these two operations makes sure that all values # before the output lengths indices are attended to attention_mask = attention_mask.at[jnp.arange(attention_mask.shape[0]), output_lengths - 1].set(1) attention_mask = jnp.flip(jnp.flip(attention_mask, -1).cumsum(-1), -1).astype("bool") return attention_mask @add_start_docstrings( "The bare Wav2Vec2 Model transformer outputting raw hidden-states without any specific head on top.", WAV_2_VEC_2_START_DOCSTRING, ) class FlaxWav2Vec2Model(FlaxWav2Vec2PreTrainedModel): module_class = FlaxWav2Vec2Module FLAX_WAV2VEC2_MODEL_DOCSTRING = """ Returns: Example: ```python >>> from transformers import Wav2Vec2Processor, FlaxWav2Vec2Model >>> from datasets import load_dataset >>> import soundfile as sf >>> processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-large-lv60") >>> model = FlaxWav2Vec2Model.from_pretrained("facebook/wav2vec2-large-lv60") >>> def map_to_array(batch): ... speech, _ = sf.read(batch["file"]) ... batch["speech"] = speech ... return batch >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> ds = ds.map(map_to_array) >>> input_values = processor( ... ds["speech"][0], sampling_rate=16_000, return_tensors="np" ... ).input_values # Batch size 1 >>> hidden_states = model(input_values).last_hidden_state ``` """ overwrite_call_docstring( FlaxWav2Vec2Model, WAV_2_VEC_2_INPUTS_DOCSTRING + FLAX_WAV2VEC2_MODEL_DOCSTRING, ) append_replace_return_docstrings( FlaxWav2Vec2Model, output_type=FlaxWav2Vec2BaseModelOutput, config_class=Wav2Vec2Config ) class FlaxWav2Vec2ForCTCModule(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.wav2vec2 = FlaxWav2Vec2Module(self.config, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.final_dropout) self.lm_head = nn.Dense( self.config.vocab_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) def __call__( self, input_values, attention_mask=None, mask_time_indices=None, deterministic=True, output_attentions=None, output_hidden_states=None, freeze_feature_encoder=False, return_dict=None, ): outputs = self.wav2vec2( input_values, attention_mask=attention_mask, mask_time_indices=mask_time_indices, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, freeze_feature_encoder=freeze_feature_encoder, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states, deterministic=deterministic) logits = self.lm_head(hidden_states) if not return_dict: return (logits,) + outputs[2:] return FlaxCausalLMOutput(logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def _get_feat_extract_output_lengths( self, input_lengths: Union[jnp.ndarray, int], add_adapter: Optional[bool] = None, ): """ Computes the output length of the convolutional layers """ add_adapter = self.config.add_adapter if add_adapter is None else add_adapter def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return (input_length - kernel_size) // stride + 1 for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) if add_adapter: for _ in range(self.config.num_adapter_layers): input_lengths = _conv_out_length(input_lengths, 1, self.config.adapter_stride) return input_lengths @add_start_docstrings( "Wav2Vec2 Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).", WAV_2_VEC_2_START_DOCSTRING, ) class FlaxWav2Vec2ForCTC(FlaxWav2Vec2PreTrainedModel): module_class = FlaxWav2Vec2ForCTCModule FLAX_WAV2VEC2_FOR_CTC_DOCSTRING = """ Returns: Example: ```python >>> import jax.numpy as jnp >>> from transformers import Wav2Vec2Processor, FlaxWav2Vec2ForCTC >>> from datasets import load_dataset >>> import soundfile as sf >>> processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-large-960h-lv60") >>> model = FlaxWav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h-lv60") >>> def map_to_array(batch): ... speech, _ = sf.read(batch["file"]) ... batch["speech"] = speech ... return batch >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> ds = ds.map(map_to_array) >>> input_values = processor( ... ds["speech"][0], sampling_rate=16_000, return_tensors="np" ... ).input_values # Batch size 1 >>> logits = model(input_values).logits >>> predicted_ids = jnp.argmax(logits, axis=-1) >>> transcription = processor.decode(predicted_ids[0]) >>> # should give: "A MAN SAID TO THE UNIVERSE SIR I EXIST" ``` """ overwrite_call_docstring( FlaxWav2Vec2ForCTC, WAV_2_VEC_2_INPUTS_DOCSTRING + FLAX_WAV2VEC2_FOR_CTC_DOCSTRING, ) append_replace_return_docstrings(FlaxWav2Vec2ForCTC, output_type=FlaxCausalLMOutput, config_class=Wav2Vec2Config) class FlaxWav2Vec2ForPreTrainingModule(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.wav2vec2 = FlaxWav2Vec2Module(self.config, dtype=self.dtype) self.dropout_features = nn.Dropout(self.config.feat_quantizer_dropout) self.quantizer = FlaxWav2Vec2GumbelVectorQuantizer(self.config, dtype=self.dtype) self.project_q = nn.Dense( self.config.proj_codevector_dim, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.project_hid = nn.Dense( self.config.proj_codevector_dim, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) def __call__( self, input_values, attention_mask=None, mask_time_indices=None, gumbel_temperature: int = 1, deterministic: bool = True, output_attentions=None, output_hidden_states=None, freeze_feature_encoder=False, return_dict=None, ): r""" Returns: Example: ```python ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.wav2vec2( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, mask_time_indices=mask_time_indices, deterministic=deterministic, freeze_feature_encoder=freeze_feature_encoder, return_dict=return_dict, ) # project all transformed features (including masked) to final vq dim transformer_features = self.project_hid(outputs[0]) # quantize all (unmasked) extracted features and project to final vq dim extract_features = self.dropout_features(outputs[1], deterministic=deterministic) quantized_features, codevector_perplexity = self.quantizer( extract_features, mask_time_indices, deterministic=deterministic, temperature=gumbel_temperature ) quantized_features = self.project_q(quantized_features) if not return_dict: return (transformer_features, quantized_features, codevector_perplexity) + outputs[2:] return FlaxWav2Vec2ForPreTrainingOutput( projected_states=transformer_features, projected_quantized_states=quantized_features, codevector_perplexity=codevector_perplexity, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def _get_feat_extract_output_lengths( self, input_lengths: Union[jnp.ndarray, int], add_adapter: Optional[bool] = None ): """ Computes the output length of the convolutional layers """ add_adapter = self.config.add_adapter if add_adapter is None else add_adapter def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return (input_length - kernel_size) // stride + 1 for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) if add_adapter: for _ in range(self.config.num_adapter_layers): input_lengths = _conv_out_length(input_lengths, 1, self.config.adapter_stride) return input_lengths @add_start_docstrings("""Wav2Vec2 Model with a quantizer and `VQ` head on top.""", WAV_2_VEC_2_START_DOCSTRING) class FlaxWav2Vec2ForPreTraining(FlaxWav2Vec2PreTrainedModel): module_class = FlaxWav2Vec2ForPreTrainingModule @add_start_docstrings_to_model_forward(WAV_2_VEC_2_INPUTS_DOCSTRING) # overwrite since has `gumbel_temperature` input def __call__( self, input_values, attention_mask=None, mask_time_indices=None, gumbel_temperature: int = 1, params: dict = None, dropout_rng: jax.random.PRNGKey = None, gumbel_rng: jax.random.PRNGKey = None, train: bool = False, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, freeze_feature_encoder: bool = False, return_dict: Optional[bool] = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict batch_size, sequence_length = input_values.shape if attention_mask is None: attention_mask = jnp.ones((batch_size, sequence_length)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng if gumbel_rng is not None: rngs["gumbel"] = gumbel_rng inputs = {"params": params or self.params} return self.module.apply( inputs, jnp.array(input_values, dtype="f4"), jnp.array(attention_mask, dtype="i4"), mask_time_indices, gumbel_temperature, not train, output_attentions, output_hidden_states, freeze_feature_encoder, return_dict, rngs=rngs, ) FLAX_WAV2VEC2_FOR_PRETRAINING_DOCSTRING = """ Returns: Example: ```python >>> import optax >>> import numpy as np >>> import jax.numpy as jnp >>> from transformers import Wav2Vec2FeatureExtractor, FlaxWav2Vec2ForPreTraining >>> from transformers.models.wav2vec2.modeling_flax_wav2vec2 import _compute_mask_indices >>> from datasets import load_dataset >>> import soundfile as sf >>> feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("facebook/wav2vec2-large-lv60") >>> model = FlaxWav2Vec2ForPreTraining.from_pretrained("facebook/wav2vec2-large-lv60") >>> def map_to_array(batch): ... speech, _ = sf.read(batch["file"]) ... batch["speech"] = speech ... return batch >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> ds = ds.map(map_to_array) >>> input_values = feature_extractor(ds["speech"][0], return_tensors="np").input_values # Batch size 1 >>> # compute masked indices >>> batch_size, raw_sequence_length = input_values.shape >>> sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length) >>> mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=0.2, mask_length=2) >>> outputs = model(input_values, mask_time_indices=mask_time_indices) >>> # compute cosine similarity between predicted (=projected_states) and target (=projected_quantized_states) >>> cosine_sim = optax.cosine_similarity(outputs.projected_states, outputs.projected_quantized_states) >>> # show that cosine similarity is much higher than random >>> assert np.asarray(cosine_sim)[mask_time_indices].mean() > 0.5 ``` """ overwrite_call_docstring( FlaxWav2Vec2ForPreTraining, WAV_2_VEC_2_INPUTS_DOCSTRING + FLAX_WAV2VEC2_FOR_PRETRAINING_DOCSTRING, ) append_replace_return_docstrings( FlaxWav2Vec2ForPreTraining, output_type=FlaxWav2Vec2ForPreTrainingOutput, config_class=Wav2Vec2Config )
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./tests/utils/test_versions_utils.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from transformers.testing_utils import TestCasePlus from transformers.utils.versions import importlib_metadata, require_version, require_version_core numpy_ver = importlib_metadata.version("numpy") python_ver = ".".join([str(x) for x in sys.version_info[:3]]) class DependencyVersionCheckTest(TestCasePlus): def test_core(self): # lt + different version strings require_version_core("numpy<1000.4.5") require_version_core("numpy<1000.4") require_version_core("numpy<1000") # le require_version_core("numpy<=1000.4.5") require_version_core(f"numpy<={numpy_ver}") # eq require_version_core(f"numpy=={numpy_ver}") # ne require_version_core("numpy!=1000.4.5") # ge require_version_core("numpy>=1.0") require_version_core("numpy>=1.0.0") require_version_core(f"numpy>={numpy_ver}") # gt require_version_core("numpy>1.0.0") # mix require_version_core("numpy>1.0.0,<1000") # requirement w/o version require_version_core("numpy") # unmet requirements due to version conflict for req in ["numpy==1.0.0", "numpy>=1000.0.0", f"numpy<{numpy_ver}"]: try: require_version_core(req) except ImportError as e: self.assertIn(f"{req} is required", str(e)) self.assertIn("but found", str(e)) # unmet requirements due to missing module for req in ["numpipypie>1", "numpipypie2"]: try: require_version_core(req) except importlib_metadata.PackageNotFoundError as e: self.assertIn(f"The '{req}' distribution was not found and is required by this application", str(e)) self.assertIn("Try: pip install transformers -U", str(e)) # bogus requirements formats: # 1. whole thing for req in ["numpy??1.0.0", "numpy1.0.0"]: try: require_version_core(req) except ValueError as e: self.assertIn("requirement needs to be in the pip package format", str(e)) # 2. only operators for req in ["numpy=1.0.0", "numpy == 1.00", "numpy<>1.0.0", "numpy><1.00", "numpy>>1.0.0"]: try: require_version_core(req) except ValueError as e: self.assertIn("need one of ", str(e)) def test_python(self): # matching requirement require_version("python>=3.6.0") # not matching requirements for req in ["python>9.9.9", "python<3.0.0"]: try: require_version_core(req) except ImportError as e: self.assertIn(f"{req} is required", str(e)) self.assertIn(f"but found python=={python_ver}", str(e))
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from transformers.testing_utils import TestCasePlus from transformers.utils.versions import importlib_metadata, require_version, require_version_core numpy_ver = importlib_metadata.version("numpy") python_ver = ".".join([str(x) for x in sys.version_info[:3]]) class DependencyVersionCheckTest(TestCasePlus): def test_core(self): # lt + different version strings require_version_core("numpy<1000.4.5") require_version_core("numpy<1000.4") require_version_core("numpy<1000") # le require_version_core("numpy<=1000.4.5") require_version_core(f"numpy<={numpy_ver}") # eq require_version_core(f"numpy=={numpy_ver}") # ne require_version_core("numpy!=1000.4.5") # ge require_version_core("numpy>=1.0") require_version_core("numpy>=1.0.0") require_version_core(f"numpy>={numpy_ver}") # gt require_version_core("numpy>1.0.0") # mix require_version_core("numpy>1.0.0,<1000") # requirement w/o version require_version_core("numpy") # unmet requirements due to version conflict for req in ["numpy==1.0.0", "numpy>=1000.0.0", f"numpy<{numpy_ver}"]: try: require_version_core(req) except ImportError as e: self.assertIn(f"{req} is required", str(e)) self.assertIn("but found", str(e)) # unmet requirements due to missing module for req in ["numpipypie>1", "numpipypie2"]: try: require_version_core(req) except importlib_metadata.PackageNotFoundError as e: self.assertIn(f"The '{req}' distribution was not found and is required by this application", str(e)) self.assertIn("Try: pip install transformers -U", str(e)) # bogus requirements formats: # 1. whole thing for req in ["numpy??1.0.0", "numpy1.0.0"]: try: require_version_core(req) except ValueError as e: self.assertIn("requirement needs to be in the pip package format", str(e)) # 2. only operators for req in ["numpy=1.0.0", "numpy == 1.00", "numpy<>1.0.0", "numpy><1.00", "numpy>>1.0.0"]: try: require_version_core(req) except ValueError as e: self.assertIn("need one of ", str(e)) def test_python(self): # matching requirement require_version("python>=3.6.0") # not matching requirements for req in ["python>9.9.9", "python<3.0.0"]: try: require_version_core(req) except ImportError as e: self.assertIn(f"{req} is required", str(e)) self.assertIn(f"but found python=={python_ver}", str(e))
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./src/transformers/models/segformer/modeling_tf_segformer.py
# coding=utf-8 # Copyright 2022 NVIDIA The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TensorFlow SegFormer model.""" import math from typing import Dict, Optional, Tuple, Union import tensorflow as tf from ...activations_tf import get_tf_activation from ...file_utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from ...modeling_tf_outputs import TFBaseModelOutput, TFSemanticSegmenterOutput, TFSequenceClassifierOutput from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list, stable_softmax from ...utils import logging from .configuration_segformer import SegformerConfig logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "SegformerConfig" _FEAT_EXTRACTOR_FOR_DOC = "SegformerFeatureExtractor" # Base docstring _CHECKPOINT_FOR_DOC = "nvidia/mit-b0" _EXPECTED_OUTPUT_SHAPE = [1, 256, 16, 16] # Image classification docstring _IMAGE_CLASS_CHECKPOINT = "nvidia/mit-b0" _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" TF_SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [ "nvidia/segformer-b0-finetuned-ade-512-512", # See all SegFormer models at https://huggingface.co/models?filter=segformer ] # Copied from transformers.models.convnext.modeling_tf_convnext.TFConvNextDropPath with ConvNext->Segformer class TFSegformerDropPath(tf.keras.layers.Layer): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). References: (1) github.com:rwightman/pytorch-image-models """ def __init__(self, drop_path, **kwargs): super().__init__(**kwargs) self.drop_path = drop_path def call(self, x, training=None): if training: keep_prob = 1 - self.drop_path shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1) random_tensor = keep_prob + tf.random.uniform(shape, 0, 1) random_tensor = tf.floor(random_tensor) return (x / keep_prob) * random_tensor return x class TFSegformerOverlapPatchEmbeddings(tf.keras.layers.Layer): """Construct the overlapping patch embeddings.""" def __init__(self, patch_size, stride, hidden_size, **kwargs): super().__init__(**kwargs) self.padding = tf.keras.layers.ZeroPadding2D(padding=patch_size // 2) self.proj = tf.keras.layers.Conv2D( filters=hidden_size, kernel_size=patch_size, strides=stride, padding="VALID", name="proj" ) self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-05, name="layer_norm") def call(self, pixel_values: tf.Tensor) -> Tuple[tf.Tensor, int, int]: embeddings = self.proj(self.padding(pixel_values)) height = shape_list(embeddings)[1] width = shape_list(embeddings)[2] hidden_dim = shape_list(embeddings)[3] # (batch_size, height, width, num_channels) -> (batch_size, height*width, num_channels) # this can be fed to a Transformer layer embeddings = tf.reshape(embeddings, (-1, height * width, hidden_dim)) embeddings = self.layer_norm(embeddings) return embeddings, height, width class TFSegformerEfficientSelfAttention(tf.keras.layers.Layer): """SegFormer's efficient self-attention mechanism. Employs the sequence reduction process introduced in the [PvT paper](https://arxiv.org/abs/2102.12122).""" def __init__( self, config: SegformerConfig, hidden_size: int, num_attention_heads: int, sequence_reduction_ratio: int, **kwargs ): super().__init__(**kwargs) self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads if self.hidden_size % self.num_attention_heads != 0: raise ValueError( f"The hidden size ({self.hidden_size}) is not a multiple of the number of attention " f"heads ({self.num_attention_heads})" ) self.attention_head_size = self.hidden_size // self.num_attention_heads self.all_head_size = self.num_attention_heads * self.attention_head_size self.sqrt_att_head_size = math.sqrt(self.attention_head_size) self.query = tf.keras.layers.Dense(self.all_head_size, name="query") self.key = tf.keras.layers.Dense(self.all_head_size, name="key") self.value = tf.keras.layers.Dense(self.all_head_size, name="value") self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob) self.sr_ratio = sequence_reduction_ratio if sequence_reduction_ratio > 1: self.sr = tf.keras.layers.Conv2D( filters=hidden_size, kernel_size=sequence_reduction_ratio, strides=sequence_reduction_ratio, name="sr" ) self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-05, name="layer_norm") def transpose_for_scores(self, tensor: tf.Tensor) -> tf.Tensor: # Reshape from [batch_size, seq_length, all_head_size] # to [batch_size, seq_length, num_attention_heads, attention_head_size] batch_size = shape_list(tensor)[0] tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size)) # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] # to [batch_size, num_attention_heads, seq_length, attention_head_size] return tf.transpose(tensor, perm=[0, 2, 1, 3]) def call( self, hidden_states: tf.Tensor, height: int, width: int, output_attentions: bool = False, training: bool = False, ) -> Union[tf.Tensor, Tuple[tf.Tensor, tf.Tensor]]: batch_size = shape_list(hidden_states)[0] num_channels = shape_list(hidden_states)[2] query_layer = self.transpose_for_scores(self.query(hidden_states)) if self.sr_ratio > 1: # Reshape to (batch_size, height, width, num_channels) hidden_states = tf.reshape(hidden_states, (batch_size, height, width, num_channels)) # Apply sequence reduction hidden_states = self.sr(hidden_states) # Reshape back to (batch_size, seq_len, num_channels) hidden_states = tf.reshape(hidden_states, (batch_size, -1, num_channels)) hidden_states = self.layer_norm(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) scale = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype) attention_scores = tf.divide(attention_scores, scale) # Normalize the attention scores to probabilities. attention_probs = stable_softmax(logits=attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs, training=training) context_layer = tf.matmul(attention_probs, value_layer) context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, all_head_size) context_layer = tf.reshape(context_layer, (batch_size, -1, self.all_head_size)) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class TFSegformerSelfOutput(tf.keras.layers.Layer): def __init__(self, config: SegformerConfig, hidden_size: int, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense(hidden_size, name="dense") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training=training) return hidden_states class TFSegformerAttention(tf.keras.layers.Layer): def __init__( self, config: SegformerConfig, hidden_size: int, num_attention_heads: int, sequence_reduction_ratio: int, **kwargs ): super().__init__(**kwargs) self.self = TFSegformerEfficientSelfAttention( config=config, hidden_size=hidden_size, num_attention_heads=num_attention_heads, sequence_reduction_ratio=sequence_reduction_ratio, name="self", ) self.dense_output = TFSegformerSelfOutput(config, hidden_size=hidden_size, name="output") def call( self, hidden_states: tf.Tensor, height: int, width: int, output_attentions: bool = False ) -> Union[tf.Tensor, Tuple[tf.Tensor, tf.Tensor]]: self_outputs = self.self(hidden_states, height, width, output_attentions) attention_output = self.dense_output(self_outputs[0]) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class TFSegformerDWConv(tf.keras.layers.Layer): def __init__(self, dim: int = 768, **kwargs): super().__init__(**kwargs) self.depthwise_convolution = tf.keras.layers.Conv2D( filters=dim, kernel_size=3, strides=1, padding="same", groups=dim, name="dwconv" ) def call(self, hidden_states: tf.Tensor, height: int, width: int) -> tf.Tensor: batch_size = shape_list(hidden_states)[0] num_channels = shape_list(hidden_states)[-1] hidden_states = tf.reshape(hidden_states, (batch_size, height, width, num_channels)) hidden_states = self.depthwise_convolution(hidden_states) new_height = shape_list(hidden_states)[1] new_width = shape_list(hidden_states)[2] num_channels = shape_list(hidden_states)[3] hidden_states = tf.reshape(hidden_states, (batch_size, new_height * new_width, num_channels)) return hidden_states class TFSegformerMixFFN(tf.keras.layers.Layer): def __init__( self, config: SegformerConfig, in_features: int, hidden_features: int = None, out_features: int = None, **kwargs ): super().__init__(**kwargs) out_features = out_features or in_features self.dense1 = tf.keras.layers.Dense(hidden_features, name="dense1") self.depthwise_convolution = TFSegformerDWConv(hidden_features, name="dwconv") if isinstance(config.hidden_act, str): self.intermediate_act_fn = get_tf_activation(config.hidden_act) else: self.intermediate_act_fn = config.hidden_act self.dense2 = tf.keras.layers.Dense(out_features, name="dense2") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) def call(self, hidden_states: tf.Tensor, height: int, width: int, training: bool = False) -> tf.Tensor: hidden_states = self.dense1(hidden_states) hidden_states = self.depthwise_convolution(hidden_states, height, width) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = self.dense2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) return hidden_states class TFSegformerLayer(tf.keras.layers.Layer): """This corresponds to the Block class in the original implementation.""" def __init__( self, config, hidden_size: int, num_attention_heads: int, drop_path: float, sequence_reduction_ratio: int, mlp_ratio: int, **kwargs ): super().__init__(**kwargs) self.layer_norm_1 = tf.keras.layers.LayerNormalization(epsilon=1e-05, name="layer_norm_1") self.attention = TFSegformerAttention( config, hidden_size=hidden_size, num_attention_heads=num_attention_heads, sequence_reduction_ratio=sequence_reduction_ratio, name="attention", ) self.drop_path = TFSegformerDropPath(drop_path) if drop_path > 0.0 else tf.keras.layers.Activation("linear") self.layer_norm_2 = tf.keras.layers.LayerNormalization(epsilon=1e-05, name="layer_norm_2") mlp_hidden_size = int(hidden_size * mlp_ratio) self.mlp = TFSegformerMixFFN(config, in_features=hidden_size, hidden_features=mlp_hidden_size, name="mlp") def call( self, hidden_states: tf.Tensor, height: int, width: int, output_attentions: bool = False, training: bool = False, ) -> Tuple: self_attention_outputs = self.attention( self.layer_norm_1(hidden_states), # in Segformer, layernorm is applied before self-attention height, width, output_attentions=output_attentions, training=training, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights # first residual connection (with stochastic depth) attention_output = self.drop_path(attention_output, training=training) hidden_states = attention_output + hidden_states mlp_output = self.mlp(self.layer_norm_2(hidden_states), height, width) # second residual connection (with stochastic depth) mlp_output = self.drop_path(mlp_output, training=training) layer_output = mlp_output + hidden_states outputs = (layer_output,) + outputs return outputs class TFSegformerEncoder(tf.keras.layers.Layer): def __init__(self, config: SegformerConfig, **kwargs): super().__init__(**kwargs) self.config = config # stochastic depth decay rule drop_path_decays = [x.numpy() for x in tf.linspace(0.0, config.drop_path_rate, sum(config.depths))] # patch embeddings embeddings = [] for i in range(config.num_encoder_blocks): embeddings.append( TFSegformerOverlapPatchEmbeddings( patch_size=config.patch_sizes[i], stride=config.strides[i], hidden_size=config.hidden_sizes[i], name=f"patch_embeddings.{i}", ) ) self.embeddings = embeddings # Transformer blocks blocks = [] cur = 0 for i in range(config.num_encoder_blocks): # each block consists of layers layers = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i]): layers.append( TFSegformerLayer( config, hidden_size=config.hidden_sizes[i], num_attention_heads=config.num_attention_heads[i], drop_path=drop_path_decays[cur + j], sequence_reduction_ratio=config.sr_ratios[i], mlp_ratio=config.mlp_ratios[i], name=f"block.{i}.{j}", ) ) blocks.append(layers) self.block = blocks # Layer norms self.layer_norms = [ tf.keras.layers.LayerNormalization(epsilon=1e-05, name=f"layer_norm.{i}") for i in range(config.num_encoder_blocks) ] def call( self, pixel_values: tf.Tensor, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, training: bool = False, ) -> Union[Tuple, TFBaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None batch_size = shape_list(pixel_values)[0] hidden_states = pixel_values for idx, x in enumerate(zip(self.embeddings, self.block, self.layer_norms)): embedding_layer, block_layer, norm_layer = x # first, obtain patch embeddings hidden_states, height, width = embedding_layer(hidden_states) # second, send embeddings through blocks # (each block consists of multiple layers i.e., list of layers) for i, blk in enumerate(block_layer): layer_outputs = blk( hidden_states, height, width, output_attentions, training=training, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) # third, apply layer norm hidden_states = norm_layer(hidden_states) # fourth, optionally reshape back to (batch_size, height, width, num_channels) if idx != len(self.embeddings) - 1 or (idx == len(self.embeddings) - 1 and self.config.reshape_last_stage): num_channels = shape_list(hidden_states)[-1] hidden_states = tf.reshape(hidden_states, (batch_size, height, width, num_channels)) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions ) @keras_serializable class TFSegformerMainLayer(tf.keras.layers.Layer): config_class = SegformerConfig def __init__(self, config: SegformerConfig, **kwargs): super().__init__(**kwargs) self.config = config # hierarchical Transformer encoder self.encoder = TFSegformerEncoder(config, name="encoder") @unpack_inputs def call( self, pixel_values: tf.Tensor, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[Tuple, TFBaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1)) encoder_outputs = self.encoder( pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = encoder_outputs[0] # Change to NCHW output format to have uniformity in the modules sequence_output = tf.transpose(sequence_output, perm=[0, 3, 1, 2]) # Change the other hidden state outputs to NCHW as well if output_hidden_states: hidden_states = tuple([tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1]]) if not return_dict: if tf.greater(len(encoder_outputs[1:]), 0): transposed_encoder_outputs = tuple(tf.transpose(v, perm=[0, 3, 1, 2]) for v in encoder_outputs[1:][0]) return (sequence_output,) + (transposed_encoder_outputs,) else: return (sequence_output,) + encoder_outputs[1:] return TFBaseModelOutput( last_hidden_state=sequence_output, hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class TFSegformerPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = SegformerConfig base_model_prefix = "segformer" main_input_name = "pixel_values" @property def dummy_inputs(self) -> Dict[str, tf.Tensor]: """ Dummy inputs to build the network. Returns: `Dict[str, tf.Tensor]`: The dummy inputs. """ VISION_DUMMY_INPUTS = tf.random.uniform(shape=(3, self.config.num_channels, 512, 512), dtype=tf.float32) return {"pixel_values": tf.constant(VISION_DUMMY_INPUTS)} @tf.function( input_signature=[ { "pixel_values": tf.TensorSpec((None, None, None, None), tf.float32, name="pixel_values"), } ] ) def serving(self, inputs): """ Method used for serving the model. Args: inputs (`Dict[str, tf.Tensor]`): The input of the saved model as a dictionary of tensors. """ output = self.call(inputs) return self.serving_output(output) SEGFORMER_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. Parameters: config ([`SegformerConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ SEGFORMER_INPUTS_DOCSTRING = r""" Args: pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoFeatureExtractor`]. See [`AutoFeatureExtractor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False``): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare SegFormer encoder (Mix-Transformer) outputting raw hidden-states without any specific head on top.", SEGFORMER_START_DOCSTRING, ) class TFSegformerModel(TFSegformerPreTrainedModel): def __init__(self, config: SegformerConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.config = config # hierarchical Transformer encoder self.segformer = TFSegformerMainLayer(config, name="segformer") @unpack_inputs @add_start_docstrings_to_model_forward(SEGFORMER_INPUTS_DOCSTRING.format("(batch_size, sequence_length)")) @add_code_sample_docstrings( processor_class=_FEAT_EXTRACTOR_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def call( self, pixel_values: tf.Tensor, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[Tuple, TFBaseModelOutput]: outputs = self.segformer( pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def serving_output(self, output: TFBaseModelOutput) -> TFBaseModelOutput: # hidden_states and attention not converted to Tensor with tf.convert_to_tensor as they are all of different dimensions return TFBaseModelOutput( last_hidden_state=output.last_hidden_state, hidden_states=output.hidden_states, attentions=output.attentions, ) @add_start_docstrings( """ SegFormer Model transformer with an image classification head on top (a linear layer on top of the final hidden states) e.g. for ImageNet. """, SEGFORMER_START_DOCSTRING, ) class TFSegformerForImageClassification(TFSegformerPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config: SegformerConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.segformer = TFSegformerMainLayer(config, name="segformer") # Classifier head self.classifier = tf.keras.layers.Dense(config.num_labels, name="classifier") @unpack_inputs @add_start_docstrings_to_model_forward(SEGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_FEAT_EXTRACTOR_FOR_DOC, checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def call( self, pixel_values: Optional[tf.Tensor] = None, labels: Optional[tf.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, TFSequenceClassifierOutput]: outputs = self.segformer( pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] # convert last hidden states to (batch_size, height*width, hidden_size) batch_size = shape_list(sequence_output)[0] sequence_output = tf.transpose(sequence_output, perm=[0, 2, 3, 1]) sequence_output = tf.reshape(sequence_output, (batch_size, -1, self.config.hidden_sizes[-1])) # global average pooling sequence_output = tf.reduce_mean(sequence_output, axis=1) logits = self.classifier(sequence_output) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions ) def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput: # hidden_states and attention not converted to Tensor with tf.convert_to_tensor as they are all of different dimensions return TFSequenceClassifierOutput( logits=output.logits, hidden_states=output.hidden_states, attentions=output.attentions ) class TFSegformerMLP(tf.keras.layers.Layer): """ Linear Embedding. """ def __init__(self, config: SegformerConfig, **kwargs): super().__init__(**kwargs) self.proj = tf.keras.layers.Dense(config.decoder_hidden_size, name="proj") def call(self, hidden_states: tf.Tensor) -> tf.Tensor: height = shape_list(hidden_states)[1] width = shape_list(hidden_states)[2] hidden_dim = shape_list(hidden_states)[-1] hidden_states = tf.reshape(hidden_states, (-1, height * width, hidden_dim)) hidden_states = self.proj(hidden_states) return hidden_states class TFSegformerDecodeHead(TFSegformerPreTrainedModel): def __init__(self, config: SegformerConfig, **kwargs): super().__init__(config, **kwargs) # linear layers which will unify the channel dimension of each of the encoder blocks to the same config.decoder_hidden_size mlps = [] for i in range(config.num_encoder_blocks): mlp = TFSegformerMLP(config, name=f"linear_c.{i}") mlps.append(mlp) self.mlps = mlps # the following 3 layers implement the ConvModule of the original implementation self.linear_fuse = tf.keras.layers.Conv2D( filters=config.decoder_hidden_size, kernel_size=1, use_bias=False, name="linear_fuse" ) self.batch_norm = tf.keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9, name="batch_norm") self.activation = tf.keras.layers.Activation("relu") self.dropout = tf.keras.layers.Dropout(config.classifier_dropout_prob) self.classifier = tf.keras.layers.Conv2D(filters=config.num_labels, kernel_size=1, name="classifier") self.config = config def call(self, encoder_hidden_states, training: bool = False): batch_size = shape_list(encoder_hidden_states[-1])[0] all_hidden_states = () for encoder_hidden_state, mlp in zip(encoder_hidden_states, self.mlps): if self.config.reshape_last_stage is False and len(shape_list(encoder_hidden_state)) == 3: height = tf.math.sqrt(tf.cast(shape_list(encoder_hidden_state)[1], tf.float32)) height = width = tf.cast(height, tf.int32) encoder_hidden_state = tf.reshape(encoder_hidden_state, (batch_size, height, width, -1)) # unify channel dimension encoder_hidden_state = tf.transpose(encoder_hidden_state, perm=[0, 2, 3, 1]) height = shape_list(encoder_hidden_state)[1] width = shape_list(encoder_hidden_state)[2] encoder_hidden_state = mlp(encoder_hidden_state) encoder_hidden_state = tf.reshape(encoder_hidden_state, (batch_size, height, width, -1)) # upsample temp_state = tf.transpose(encoder_hidden_states[0], perm=[0, 2, 3, 1]) upsample_resolution = shape_list(temp_state)[1:-1] encoder_hidden_state = tf.image.resize(encoder_hidden_state, size=upsample_resolution, method="bilinear") all_hidden_states += (encoder_hidden_state,) hidden_states = self.linear_fuse(tf.concat(all_hidden_states[::-1], axis=-1)) hidden_states = self.batch_norm(hidden_states, training=training) hidden_states = self.activation(hidden_states) hidden_states = self.dropout(hidden_states, training=training) # logits of shape (batch_size, height/4, width/4, num_labels) logits = self.classifier(hidden_states) return logits @add_start_docstrings( """SegFormer Model transformer with an all-MLP decode head on top e.g. for ADE20k, CityScapes.""", SEGFORMER_START_DOCSTRING, ) class TFSegformerForSemanticSegmentation(TFSegformerPreTrainedModel): def __init__(self, config: SegformerConfig, **kwargs): super().__init__(config, **kwargs) self.segformer = TFSegformerMainLayer(config, name="segformer") self.decode_head = TFSegformerDecodeHead(config, name="decode_head") def hf_compute_loss(self, logits, labels): # upsample logits to the images' original size # `labels` is of shape (batch_size, height, width) label_interp_shape = shape_list(labels)[1:] upsampled_logits = tf.image.resize(logits, size=label_interp_shape, method="bilinear") # compute weighted loss loss_fct = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction="none") def masked_loss(real, pred): unmasked_loss = loss_fct(real, pred) mask = tf.cast(real != self.config.semantic_loss_ignore_index, dtype=unmasked_loss.dtype) masked_loss = unmasked_loss * mask # Reduction strategy in the similar spirit with # https://github.com/huggingface/transformers/blob/main/src/transformers/modeling_tf_utils.py#L210 reduced_masked_loss = tf.reduce_sum(masked_loss) / tf.reduce_sum(mask) return tf.reshape(reduced_masked_loss, (1,)) return masked_loss(labels, upsampled_logits) @unpack_inputs @add_start_docstrings_to_model_forward(SEGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFSemanticSegmenterOutput, config_class=_CONFIG_FOR_DOC) def call( self, pixel_values: tf.Tensor, labels: Optional[tf.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, TFSemanticSegmenterOutput]: r""" labels (`tf.Tensor` of shape `(batch_size, height, width)`, *optional*): Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1`, a (per-pixel) classification loss is computed (Cross-Entropy). Returns: Examples: ```python >>> from transformers import SegformerFeatureExtractor, TFSegformerForSemanticSegmentation >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> feature_extractor = SegformerFeatureExtractor.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512") >>> model = TFSegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512") >>> inputs = feature_extractor(images=image, return_tensors="tf") >>> outputs = model(**inputs, training=False) >>> # logits are of shape (batch_size, num_labels, height/4, width/4) >>> logits = outputs.logits >>> list(logits.shape) [1, 150, 128, 128] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) outputs = self.segformer( pixel_values, output_attentions=output_attentions, output_hidden_states=True, # we need the intermediate hidden states return_dict=return_dict, ) encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1] logits = self.decode_head(encoder_hidden_states) loss = None if labels is not None: if not self.config.num_labels > 1: raise ValueError("The number of labels should be greater than one") else: loss = self.hf_compute_loss(logits=logits, labels=labels) # make logits of shape (batch_size, num_labels, height, width) to # keep them consistent across APIs logits = tf.transpose(logits, perm=[0, 3, 1, 2]) if not return_dict: if output_hidden_states: output = (logits,) + outputs[1:] else: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSemanticSegmenterOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, ) def serving_output(self, output: TFSemanticSegmenterOutput) -> TFSemanticSegmenterOutput: # hidden_states and attention not converted to Tensor with tf.convert_to_tensor as they are all of different dimensions return TFSemanticSegmenterOutput( logits=output.logits, hidden_states=output.hidden_states, attentions=output.attentions )
# coding=utf-8 # Copyright 2022 NVIDIA The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TensorFlow SegFormer model.""" import math from typing import Dict, Optional, Tuple, Union import tensorflow as tf from ...activations_tf import get_tf_activation from ...file_utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from ...modeling_tf_outputs import TFBaseModelOutput, TFSemanticSegmenterOutput, TFSequenceClassifierOutput from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list, stable_softmax from ...utils import logging from .configuration_segformer import SegformerConfig logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "SegformerConfig" _FEAT_EXTRACTOR_FOR_DOC = "SegformerFeatureExtractor" # Base docstring _CHECKPOINT_FOR_DOC = "nvidia/mit-b0" _EXPECTED_OUTPUT_SHAPE = [1, 256, 16, 16] # Image classification docstring _IMAGE_CLASS_CHECKPOINT = "nvidia/mit-b0" _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" TF_SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [ "nvidia/segformer-b0-finetuned-ade-512-512", # See all SegFormer models at https://huggingface.co/models?filter=segformer ] # Copied from transformers.models.convnext.modeling_tf_convnext.TFConvNextDropPath with ConvNext->Segformer class TFSegformerDropPath(tf.keras.layers.Layer): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). References: (1) github.com:rwightman/pytorch-image-models """ def __init__(self, drop_path, **kwargs): super().__init__(**kwargs) self.drop_path = drop_path def call(self, x, training=None): if training: keep_prob = 1 - self.drop_path shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1) random_tensor = keep_prob + tf.random.uniform(shape, 0, 1) random_tensor = tf.floor(random_tensor) return (x / keep_prob) * random_tensor return x class TFSegformerOverlapPatchEmbeddings(tf.keras.layers.Layer): """Construct the overlapping patch embeddings.""" def __init__(self, patch_size, stride, hidden_size, **kwargs): super().__init__(**kwargs) self.padding = tf.keras.layers.ZeroPadding2D(padding=patch_size // 2) self.proj = tf.keras.layers.Conv2D( filters=hidden_size, kernel_size=patch_size, strides=stride, padding="VALID", name="proj" ) self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-05, name="layer_norm") def call(self, pixel_values: tf.Tensor) -> Tuple[tf.Tensor, int, int]: embeddings = self.proj(self.padding(pixel_values)) height = shape_list(embeddings)[1] width = shape_list(embeddings)[2] hidden_dim = shape_list(embeddings)[3] # (batch_size, height, width, num_channels) -> (batch_size, height*width, num_channels) # this can be fed to a Transformer layer embeddings = tf.reshape(embeddings, (-1, height * width, hidden_dim)) embeddings = self.layer_norm(embeddings) return embeddings, height, width class TFSegformerEfficientSelfAttention(tf.keras.layers.Layer): """SegFormer's efficient self-attention mechanism. Employs the sequence reduction process introduced in the [PvT paper](https://arxiv.org/abs/2102.12122).""" def __init__( self, config: SegformerConfig, hidden_size: int, num_attention_heads: int, sequence_reduction_ratio: int, **kwargs ): super().__init__(**kwargs) self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads if self.hidden_size % self.num_attention_heads != 0: raise ValueError( f"The hidden size ({self.hidden_size}) is not a multiple of the number of attention " f"heads ({self.num_attention_heads})" ) self.attention_head_size = self.hidden_size // self.num_attention_heads self.all_head_size = self.num_attention_heads * self.attention_head_size self.sqrt_att_head_size = math.sqrt(self.attention_head_size) self.query = tf.keras.layers.Dense(self.all_head_size, name="query") self.key = tf.keras.layers.Dense(self.all_head_size, name="key") self.value = tf.keras.layers.Dense(self.all_head_size, name="value") self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob) self.sr_ratio = sequence_reduction_ratio if sequence_reduction_ratio > 1: self.sr = tf.keras.layers.Conv2D( filters=hidden_size, kernel_size=sequence_reduction_ratio, strides=sequence_reduction_ratio, name="sr" ) self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-05, name="layer_norm") def transpose_for_scores(self, tensor: tf.Tensor) -> tf.Tensor: # Reshape from [batch_size, seq_length, all_head_size] # to [batch_size, seq_length, num_attention_heads, attention_head_size] batch_size = shape_list(tensor)[0] tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size)) # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] # to [batch_size, num_attention_heads, seq_length, attention_head_size] return tf.transpose(tensor, perm=[0, 2, 1, 3]) def call( self, hidden_states: tf.Tensor, height: int, width: int, output_attentions: bool = False, training: bool = False, ) -> Union[tf.Tensor, Tuple[tf.Tensor, tf.Tensor]]: batch_size = shape_list(hidden_states)[0] num_channels = shape_list(hidden_states)[2] query_layer = self.transpose_for_scores(self.query(hidden_states)) if self.sr_ratio > 1: # Reshape to (batch_size, height, width, num_channels) hidden_states = tf.reshape(hidden_states, (batch_size, height, width, num_channels)) # Apply sequence reduction hidden_states = self.sr(hidden_states) # Reshape back to (batch_size, seq_len, num_channels) hidden_states = tf.reshape(hidden_states, (batch_size, -1, num_channels)) hidden_states = self.layer_norm(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) scale = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype) attention_scores = tf.divide(attention_scores, scale) # Normalize the attention scores to probabilities. attention_probs = stable_softmax(logits=attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs, training=training) context_layer = tf.matmul(attention_probs, value_layer) context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, all_head_size) context_layer = tf.reshape(context_layer, (batch_size, -1, self.all_head_size)) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class TFSegformerSelfOutput(tf.keras.layers.Layer): def __init__(self, config: SegformerConfig, hidden_size: int, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense(hidden_size, name="dense") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training=training) return hidden_states class TFSegformerAttention(tf.keras.layers.Layer): def __init__( self, config: SegformerConfig, hidden_size: int, num_attention_heads: int, sequence_reduction_ratio: int, **kwargs ): super().__init__(**kwargs) self.self = TFSegformerEfficientSelfAttention( config=config, hidden_size=hidden_size, num_attention_heads=num_attention_heads, sequence_reduction_ratio=sequence_reduction_ratio, name="self", ) self.dense_output = TFSegformerSelfOutput(config, hidden_size=hidden_size, name="output") def call( self, hidden_states: tf.Tensor, height: int, width: int, output_attentions: bool = False ) -> Union[tf.Tensor, Tuple[tf.Tensor, tf.Tensor]]: self_outputs = self.self(hidden_states, height, width, output_attentions) attention_output = self.dense_output(self_outputs[0]) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class TFSegformerDWConv(tf.keras.layers.Layer): def __init__(self, dim: int = 768, **kwargs): super().__init__(**kwargs) self.depthwise_convolution = tf.keras.layers.Conv2D( filters=dim, kernel_size=3, strides=1, padding="same", groups=dim, name="dwconv" ) def call(self, hidden_states: tf.Tensor, height: int, width: int) -> tf.Tensor: batch_size = shape_list(hidden_states)[0] num_channels = shape_list(hidden_states)[-1] hidden_states = tf.reshape(hidden_states, (batch_size, height, width, num_channels)) hidden_states = self.depthwise_convolution(hidden_states) new_height = shape_list(hidden_states)[1] new_width = shape_list(hidden_states)[2] num_channels = shape_list(hidden_states)[3] hidden_states = tf.reshape(hidden_states, (batch_size, new_height * new_width, num_channels)) return hidden_states class TFSegformerMixFFN(tf.keras.layers.Layer): def __init__( self, config: SegformerConfig, in_features: int, hidden_features: int = None, out_features: int = None, **kwargs ): super().__init__(**kwargs) out_features = out_features or in_features self.dense1 = tf.keras.layers.Dense(hidden_features, name="dense1") self.depthwise_convolution = TFSegformerDWConv(hidden_features, name="dwconv") if isinstance(config.hidden_act, str): self.intermediate_act_fn = get_tf_activation(config.hidden_act) else: self.intermediate_act_fn = config.hidden_act self.dense2 = tf.keras.layers.Dense(out_features, name="dense2") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) def call(self, hidden_states: tf.Tensor, height: int, width: int, training: bool = False) -> tf.Tensor: hidden_states = self.dense1(hidden_states) hidden_states = self.depthwise_convolution(hidden_states, height, width) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = self.dense2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) return hidden_states class TFSegformerLayer(tf.keras.layers.Layer): """This corresponds to the Block class in the original implementation.""" def __init__( self, config, hidden_size: int, num_attention_heads: int, drop_path: float, sequence_reduction_ratio: int, mlp_ratio: int, **kwargs ): super().__init__(**kwargs) self.layer_norm_1 = tf.keras.layers.LayerNormalization(epsilon=1e-05, name="layer_norm_1") self.attention = TFSegformerAttention( config, hidden_size=hidden_size, num_attention_heads=num_attention_heads, sequence_reduction_ratio=sequence_reduction_ratio, name="attention", ) self.drop_path = TFSegformerDropPath(drop_path) if drop_path > 0.0 else tf.keras.layers.Activation("linear") self.layer_norm_2 = tf.keras.layers.LayerNormalization(epsilon=1e-05, name="layer_norm_2") mlp_hidden_size = int(hidden_size * mlp_ratio) self.mlp = TFSegformerMixFFN(config, in_features=hidden_size, hidden_features=mlp_hidden_size, name="mlp") def call( self, hidden_states: tf.Tensor, height: int, width: int, output_attentions: bool = False, training: bool = False, ) -> Tuple: self_attention_outputs = self.attention( self.layer_norm_1(hidden_states), # in Segformer, layernorm is applied before self-attention height, width, output_attentions=output_attentions, training=training, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights # first residual connection (with stochastic depth) attention_output = self.drop_path(attention_output, training=training) hidden_states = attention_output + hidden_states mlp_output = self.mlp(self.layer_norm_2(hidden_states), height, width) # second residual connection (with stochastic depth) mlp_output = self.drop_path(mlp_output, training=training) layer_output = mlp_output + hidden_states outputs = (layer_output,) + outputs return outputs class TFSegformerEncoder(tf.keras.layers.Layer): def __init__(self, config: SegformerConfig, **kwargs): super().__init__(**kwargs) self.config = config # stochastic depth decay rule drop_path_decays = [x.numpy() for x in tf.linspace(0.0, config.drop_path_rate, sum(config.depths))] # patch embeddings embeddings = [] for i in range(config.num_encoder_blocks): embeddings.append( TFSegformerOverlapPatchEmbeddings( patch_size=config.patch_sizes[i], stride=config.strides[i], hidden_size=config.hidden_sizes[i], name=f"patch_embeddings.{i}", ) ) self.embeddings = embeddings # Transformer blocks blocks = [] cur = 0 for i in range(config.num_encoder_blocks): # each block consists of layers layers = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i]): layers.append( TFSegformerLayer( config, hidden_size=config.hidden_sizes[i], num_attention_heads=config.num_attention_heads[i], drop_path=drop_path_decays[cur + j], sequence_reduction_ratio=config.sr_ratios[i], mlp_ratio=config.mlp_ratios[i], name=f"block.{i}.{j}", ) ) blocks.append(layers) self.block = blocks # Layer norms self.layer_norms = [ tf.keras.layers.LayerNormalization(epsilon=1e-05, name=f"layer_norm.{i}") for i in range(config.num_encoder_blocks) ] def call( self, pixel_values: tf.Tensor, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, training: bool = False, ) -> Union[Tuple, TFBaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None batch_size = shape_list(pixel_values)[0] hidden_states = pixel_values for idx, x in enumerate(zip(self.embeddings, self.block, self.layer_norms)): embedding_layer, block_layer, norm_layer = x # first, obtain patch embeddings hidden_states, height, width = embedding_layer(hidden_states) # second, send embeddings through blocks # (each block consists of multiple layers i.e., list of layers) for i, blk in enumerate(block_layer): layer_outputs = blk( hidden_states, height, width, output_attentions, training=training, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) # third, apply layer norm hidden_states = norm_layer(hidden_states) # fourth, optionally reshape back to (batch_size, height, width, num_channels) if idx != len(self.embeddings) - 1 or (idx == len(self.embeddings) - 1 and self.config.reshape_last_stage): num_channels = shape_list(hidden_states)[-1] hidden_states = tf.reshape(hidden_states, (batch_size, height, width, num_channels)) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions ) @keras_serializable class TFSegformerMainLayer(tf.keras.layers.Layer): config_class = SegformerConfig def __init__(self, config: SegformerConfig, **kwargs): super().__init__(**kwargs) self.config = config # hierarchical Transformer encoder self.encoder = TFSegformerEncoder(config, name="encoder") @unpack_inputs def call( self, pixel_values: tf.Tensor, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[Tuple, TFBaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1)) encoder_outputs = self.encoder( pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = encoder_outputs[0] # Change to NCHW output format to have uniformity in the modules sequence_output = tf.transpose(sequence_output, perm=[0, 3, 1, 2]) # Change the other hidden state outputs to NCHW as well if output_hidden_states: hidden_states = tuple([tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1]]) if not return_dict: if tf.greater(len(encoder_outputs[1:]), 0): transposed_encoder_outputs = tuple(tf.transpose(v, perm=[0, 3, 1, 2]) for v in encoder_outputs[1:][0]) return (sequence_output,) + (transposed_encoder_outputs,) else: return (sequence_output,) + encoder_outputs[1:] return TFBaseModelOutput( last_hidden_state=sequence_output, hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class TFSegformerPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = SegformerConfig base_model_prefix = "segformer" main_input_name = "pixel_values" @property def dummy_inputs(self) -> Dict[str, tf.Tensor]: """ Dummy inputs to build the network. Returns: `Dict[str, tf.Tensor]`: The dummy inputs. """ VISION_DUMMY_INPUTS = tf.random.uniform(shape=(3, self.config.num_channels, 512, 512), dtype=tf.float32) return {"pixel_values": tf.constant(VISION_DUMMY_INPUTS)} @tf.function( input_signature=[ { "pixel_values": tf.TensorSpec((None, None, None, None), tf.float32, name="pixel_values"), } ] ) def serving(self, inputs): """ Method used for serving the model. Args: inputs (`Dict[str, tf.Tensor]`): The input of the saved model as a dictionary of tensors. """ output = self.call(inputs) return self.serving_output(output) SEGFORMER_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. Parameters: config ([`SegformerConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ SEGFORMER_INPUTS_DOCSTRING = r""" Args: pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoFeatureExtractor`]. See [`AutoFeatureExtractor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False``): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare SegFormer encoder (Mix-Transformer) outputting raw hidden-states without any specific head on top.", SEGFORMER_START_DOCSTRING, ) class TFSegformerModel(TFSegformerPreTrainedModel): def __init__(self, config: SegformerConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.config = config # hierarchical Transformer encoder self.segformer = TFSegformerMainLayer(config, name="segformer") @unpack_inputs @add_start_docstrings_to_model_forward(SEGFORMER_INPUTS_DOCSTRING.format("(batch_size, sequence_length)")) @add_code_sample_docstrings( processor_class=_FEAT_EXTRACTOR_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def call( self, pixel_values: tf.Tensor, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[Tuple, TFBaseModelOutput]: outputs = self.segformer( pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def serving_output(self, output: TFBaseModelOutput) -> TFBaseModelOutput: # hidden_states and attention not converted to Tensor with tf.convert_to_tensor as they are all of different dimensions return TFBaseModelOutput( last_hidden_state=output.last_hidden_state, hidden_states=output.hidden_states, attentions=output.attentions, ) @add_start_docstrings( """ SegFormer Model transformer with an image classification head on top (a linear layer on top of the final hidden states) e.g. for ImageNet. """, SEGFORMER_START_DOCSTRING, ) class TFSegformerForImageClassification(TFSegformerPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config: SegformerConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.segformer = TFSegformerMainLayer(config, name="segformer") # Classifier head self.classifier = tf.keras.layers.Dense(config.num_labels, name="classifier") @unpack_inputs @add_start_docstrings_to_model_forward(SEGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_FEAT_EXTRACTOR_FOR_DOC, checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def call( self, pixel_values: Optional[tf.Tensor] = None, labels: Optional[tf.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, TFSequenceClassifierOutput]: outputs = self.segformer( pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] # convert last hidden states to (batch_size, height*width, hidden_size) batch_size = shape_list(sequence_output)[0] sequence_output = tf.transpose(sequence_output, perm=[0, 2, 3, 1]) sequence_output = tf.reshape(sequence_output, (batch_size, -1, self.config.hidden_sizes[-1])) # global average pooling sequence_output = tf.reduce_mean(sequence_output, axis=1) logits = self.classifier(sequence_output) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions ) def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput: # hidden_states and attention not converted to Tensor with tf.convert_to_tensor as they are all of different dimensions return TFSequenceClassifierOutput( logits=output.logits, hidden_states=output.hidden_states, attentions=output.attentions ) class TFSegformerMLP(tf.keras.layers.Layer): """ Linear Embedding. """ def __init__(self, config: SegformerConfig, **kwargs): super().__init__(**kwargs) self.proj = tf.keras.layers.Dense(config.decoder_hidden_size, name="proj") def call(self, hidden_states: tf.Tensor) -> tf.Tensor: height = shape_list(hidden_states)[1] width = shape_list(hidden_states)[2] hidden_dim = shape_list(hidden_states)[-1] hidden_states = tf.reshape(hidden_states, (-1, height * width, hidden_dim)) hidden_states = self.proj(hidden_states) return hidden_states class TFSegformerDecodeHead(TFSegformerPreTrainedModel): def __init__(self, config: SegformerConfig, **kwargs): super().__init__(config, **kwargs) # linear layers which will unify the channel dimension of each of the encoder blocks to the same config.decoder_hidden_size mlps = [] for i in range(config.num_encoder_blocks): mlp = TFSegformerMLP(config, name=f"linear_c.{i}") mlps.append(mlp) self.mlps = mlps # the following 3 layers implement the ConvModule of the original implementation self.linear_fuse = tf.keras.layers.Conv2D( filters=config.decoder_hidden_size, kernel_size=1, use_bias=False, name="linear_fuse" ) self.batch_norm = tf.keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9, name="batch_norm") self.activation = tf.keras.layers.Activation("relu") self.dropout = tf.keras.layers.Dropout(config.classifier_dropout_prob) self.classifier = tf.keras.layers.Conv2D(filters=config.num_labels, kernel_size=1, name="classifier") self.config = config def call(self, encoder_hidden_states, training: bool = False): batch_size = shape_list(encoder_hidden_states[-1])[0] all_hidden_states = () for encoder_hidden_state, mlp in zip(encoder_hidden_states, self.mlps): if self.config.reshape_last_stage is False and len(shape_list(encoder_hidden_state)) == 3: height = tf.math.sqrt(tf.cast(shape_list(encoder_hidden_state)[1], tf.float32)) height = width = tf.cast(height, tf.int32) encoder_hidden_state = tf.reshape(encoder_hidden_state, (batch_size, height, width, -1)) # unify channel dimension encoder_hidden_state = tf.transpose(encoder_hidden_state, perm=[0, 2, 3, 1]) height = shape_list(encoder_hidden_state)[1] width = shape_list(encoder_hidden_state)[2] encoder_hidden_state = mlp(encoder_hidden_state) encoder_hidden_state = tf.reshape(encoder_hidden_state, (batch_size, height, width, -1)) # upsample temp_state = tf.transpose(encoder_hidden_states[0], perm=[0, 2, 3, 1]) upsample_resolution = shape_list(temp_state)[1:-1] encoder_hidden_state = tf.image.resize(encoder_hidden_state, size=upsample_resolution, method="bilinear") all_hidden_states += (encoder_hidden_state,) hidden_states = self.linear_fuse(tf.concat(all_hidden_states[::-1], axis=-1)) hidden_states = self.batch_norm(hidden_states, training=training) hidden_states = self.activation(hidden_states) hidden_states = self.dropout(hidden_states, training=training) # logits of shape (batch_size, height/4, width/4, num_labels) logits = self.classifier(hidden_states) return logits @add_start_docstrings( """SegFormer Model transformer with an all-MLP decode head on top e.g. for ADE20k, CityScapes.""", SEGFORMER_START_DOCSTRING, ) class TFSegformerForSemanticSegmentation(TFSegformerPreTrainedModel): def __init__(self, config: SegformerConfig, **kwargs): super().__init__(config, **kwargs) self.segformer = TFSegformerMainLayer(config, name="segformer") self.decode_head = TFSegformerDecodeHead(config, name="decode_head") def hf_compute_loss(self, logits, labels): # upsample logits to the images' original size # `labels` is of shape (batch_size, height, width) label_interp_shape = shape_list(labels)[1:] upsampled_logits = tf.image.resize(logits, size=label_interp_shape, method="bilinear") # compute weighted loss loss_fct = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction="none") def masked_loss(real, pred): unmasked_loss = loss_fct(real, pred) mask = tf.cast(real != self.config.semantic_loss_ignore_index, dtype=unmasked_loss.dtype) masked_loss = unmasked_loss * mask # Reduction strategy in the similar spirit with # https://github.com/huggingface/transformers/blob/main/src/transformers/modeling_tf_utils.py#L210 reduced_masked_loss = tf.reduce_sum(masked_loss) / tf.reduce_sum(mask) return tf.reshape(reduced_masked_loss, (1,)) return masked_loss(labels, upsampled_logits) @unpack_inputs @add_start_docstrings_to_model_forward(SEGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFSemanticSegmenterOutput, config_class=_CONFIG_FOR_DOC) def call( self, pixel_values: tf.Tensor, labels: Optional[tf.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, TFSemanticSegmenterOutput]: r""" labels (`tf.Tensor` of shape `(batch_size, height, width)`, *optional*): Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1`, a (per-pixel) classification loss is computed (Cross-Entropy). Returns: Examples: ```python >>> from transformers import SegformerFeatureExtractor, TFSegformerForSemanticSegmentation >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> feature_extractor = SegformerFeatureExtractor.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512") >>> model = TFSegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512") >>> inputs = feature_extractor(images=image, return_tensors="tf") >>> outputs = model(**inputs, training=False) >>> # logits are of shape (batch_size, num_labels, height/4, width/4) >>> logits = outputs.logits >>> list(logits.shape) [1, 150, 128, 128] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) outputs = self.segformer( pixel_values, output_attentions=output_attentions, output_hidden_states=True, # we need the intermediate hidden states return_dict=return_dict, ) encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1] logits = self.decode_head(encoder_hidden_states) loss = None if labels is not None: if not self.config.num_labels > 1: raise ValueError("The number of labels should be greater than one") else: loss = self.hf_compute_loss(logits=logits, labels=labels) # make logits of shape (batch_size, num_labels, height, width) to # keep them consistent across APIs logits = tf.transpose(logits, perm=[0, 3, 1, 2]) if not return_dict: if output_hidden_states: output = (logits,) + outputs[1:] else: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSemanticSegmenterOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, ) def serving_output(self, output: TFSemanticSegmenterOutput) -> TFSemanticSegmenterOutput: # hidden_states and attention not converted to Tensor with tf.convert_to_tensor as they are all of different dimensions return TFSemanticSegmenterOutput( logits=output.logits, hidden_states=output.hidden_states, attentions=output.attentions )
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./tests/utils/test_skip_decorators.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # # this test validates that we can stack skip decorators in groups and whether # they work correctly with other decorators # # since the decorators have already built their decision params (like checking # env[], we can't mock the env and test each of the combinations), so ideally # the following 4 should be run. But since we have different CI jobs running # different configs, all combinations should get covered # # RUN_SLOW=1 pytest -rA tests/test_skip_decorators.py # RUN_SLOW=1 CUDA_VISIBLE_DEVICES="" pytest -rA tests/test_skip_decorators.py # RUN_SLOW=0 pytest -rA tests/test_skip_decorators.py # RUN_SLOW=0 CUDA_VISIBLE_DEVICES="" pytest -rA tests/test_skip_decorators.py import os import unittest import pytest from parameterized import parameterized from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device # skipping in unittest tests params = [(1,)] # test that we can stack our skip decorators with 3rd party decorators def check_slow(): run_slow = bool(os.getenv("RUN_SLOW", 0)) if run_slow: assert True else: assert False, "should have been skipped" # test that we can stack our skip decorators def check_slow_torch_cuda(): run_slow = bool(os.getenv("RUN_SLOW", 0)) if run_slow and torch_device == "cuda": assert True else: assert False, "should have been skipped" @require_torch class SkipTester(unittest.TestCase): @slow @require_torch_gpu def test_2_skips_slow_first(self): check_slow_torch_cuda() @require_torch_gpu @slow def test_2_skips_slow_last(self): check_slow_torch_cuda() # The combination of any skip decorator, followed by parameterized fails to skip the tests # 1. @slow manages to correctly skip `test_param_slow_first` # 2. but then `parameterized` creates new tests, with a unique name for each parameter groups. # It has no idea that they are to be skipped and so they all run, ignoring @slow # Therefore skip decorators must come after `parameterized` # # @slow # @parameterized.expand(params) # def test_param_slow_first(self, param=None): # check_slow() # This works as expected: # 1. `parameterized` creates new tests with unique names # 2. each of them gets an opportunity to be skipped @parameterized.expand(params) @slow def test_param_slow_last(self, param=None): check_slow() # skipping in non-unittest tests # no problem at all here @slow @require_torch_gpu def test_pytest_2_skips_slow_first(): check_slow_torch_cuda() @require_torch_gpu @slow def test_pytest_2_skips_slow_last(): check_slow_torch_cuda() @slow @pytest.mark.parametrize("param", [1]) def test_pytest_param_slow_first(param): check_slow() @pytest.mark.parametrize("param", [1]) @slow def test_pytest_param_slow_last(param): check_slow()
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # # this test validates that we can stack skip decorators in groups and whether # they work correctly with other decorators # # since the decorators have already built their decision params (like checking # env[], we can't mock the env and test each of the combinations), so ideally # the following 4 should be run. But since we have different CI jobs running # different configs, all combinations should get covered # # RUN_SLOW=1 pytest -rA tests/test_skip_decorators.py # RUN_SLOW=1 CUDA_VISIBLE_DEVICES="" pytest -rA tests/test_skip_decorators.py # RUN_SLOW=0 pytest -rA tests/test_skip_decorators.py # RUN_SLOW=0 CUDA_VISIBLE_DEVICES="" pytest -rA tests/test_skip_decorators.py import os import unittest import pytest from parameterized import parameterized from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device # skipping in unittest tests params = [(1,)] # test that we can stack our skip decorators with 3rd party decorators def check_slow(): run_slow = bool(os.getenv("RUN_SLOW", 0)) if run_slow: assert True else: assert False, "should have been skipped" # test that we can stack our skip decorators def check_slow_torch_cuda(): run_slow = bool(os.getenv("RUN_SLOW", 0)) if run_slow and torch_device == "cuda": assert True else: assert False, "should have been skipped" @require_torch class SkipTester(unittest.TestCase): @slow @require_torch_gpu def test_2_skips_slow_first(self): check_slow_torch_cuda() @require_torch_gpu @slow def test_2_skips_slow_last(self): check_slow_torch_cuda() # The combination of any skip decorator, followed by parameterized fails to skip the tests # 1. @slow manages to correctly skip `test_param_slow_first` # 2. but then `parameterized` creates new tests, with a unique name for each parameter groups. # It has no idea that they are to be skipped and so they all run, ignoring @slow # Therefore skip decorators must come after `parameterized` # # @slow # @parameterized.expand(params) # def test_param_slow_first(self, param=None): # check_slow() # This works as expected: # 1. `parameterized` creates new tests with unique names # 2. each of them gets an opportunity to be skipped @parameterized.expand(params) @slow def test_param_slow_last(self, param=None): check_slow() # skipping in non-unittest tests # no problem at all here @slow @require_torch_gpu def test_pytest_2_skips_slow_first(): check_slow_torch_cuda() @require_torch_gpu @slow def test_pytest_2_skips_slow_last(): check_slow_torch_cuda() @slow @pytest.mark.parametrize("param", [1]) def test_pytest_param_slow_first(param): check_slow() @pytest.mark.parametrize("param", [1]) @slow def test_pytest_param_slow_last(param): check_slow()
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./src/transformers/models/vit/image_processing_vit.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for ViT.""" from typing import Dict, List, Optional, Union import numpy as np from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import logging logger = logging.get_logger(__name__) class ViTImageProcessor(BaseImageProcessor): r""" Constructs a ViT image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `(size["height"], size["width"])`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`dict`, *optional*, defaults to `{"height": 224, "width": 224}`): Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize: Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Optional[Dict[str, int]] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, **kwargs ) -> None: super().__init__(**kwargs) size = size if size is not None else {"height": 224, "width": 224} size = get_size_dict(size) self.do_resize = do_resize self.do_rescale = do_rescale self.do_normalize = do_normalize self.size = size self.resample = resample self.rescale_factor = rescale_factor self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs ) -> np.ndarray: """ Resize an image to `(size["height"], size["width"])`. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. resample: `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. Returns: `np.ndarray`: The resized image. """ size = get_size_dict(size) if "height" not in size or "width" not in size: raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}") return resize( image, size=(size["height"], size["width"]), resample=resample, data_format=data_format, **kwargs ) def rescale( self, image: np.ndarray, scale: float, data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs ) -> np.ndarray: """ Rescale an image by a scale factor. image = image * scale. Args: image (`np.ndarray`): Image to rescale. scale (`float`): The scaling factor to rescale pixel values by. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. Returns: `np.ndarray`: The rescaled image. """ return rescale(image, scale=scale, data_format=data_format, **kwargs) def normalize( self, image: np.ndarray, mean: Union[float, List[float]], std: Union[float, List[float]], data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs ) -> np.ndarray: """ Normalize an image. image = (image - image_mean) / image_std. Args: image (`np.ndarray`): Image to normalize. mean (`float` or `List[float]`): Image mean to use for normalization. std (`float` or `List[float]`): Image standard deviation to use for normalization. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. Returns: `np.ndarray`: The normalized image. """ return normalize(image, mean=mean, std=std, data_format=data_format, **kwargs) def preprocess( self, images: ImageInput, do_resize: Optional[bool] = None, size: Dict[str, int] = None, resample: PILImageResampling = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, **kwargs, ): """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after resizing. resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`): `PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean to use if `do_normalize` is set to `True`. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use if `do_normalize` is set to `True`. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. """ do_resize = do_resize if do_resize is not None else self.do_resize do_rescale = do_rescale if do_rescale is not None else self.do_rescale do_normalize = do_normalize if do_normalize is not None else self.do_normalize resample = resample if resample is not None else self.resample rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std size = size if size is not None else self.size size_dict = get_size_dict(size) if not is_batched(images): images = [images] if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True.") if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if do_resize: images = [self.resize(image=image, size=size_dict, resample=resample) for image in images] if do_rescale: images = [self.rescale(image=image, scale=rescale_factor) for image in images] if do_normalize: images = [self.normalize(image=image, mean=image_mean, std=image_std) for image in images] images = [to_channel_dimension_format(image, data_format) for image in images] data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors)
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for ViT.""" from typing import Dict, List, Optional, Union import numpy as np from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import logging logger = logging.get_logger(__name__) class ViTImageProcessor(BaseImageProcessor): r""" Constructs a ViT image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `(size["height"], size["width"])`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`dict`, *optional*, defaults to `{"height": 224, "width": 224}`): Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize: Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Optional[Dict[str, int]] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, **kwargs ) -> None: super().__init__(**kwargs) size = size if size is not None else {"height": 224, "width": 224} size = get_size_dict(size) self.do_resize = do_resize self.do_rescale = do_rescale self.do_normalize = do_normalize self.size = size self.resample = resample self.rescale_factor = rescale_factor self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs ) -> np.ndarray: """ Resize an image to `(size["height"], size["width"])`. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. resample: `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. Returns: `np.ndarray`: The resized image. """ size = get_size_dict(size) if "height" not in size or "width" not in size: raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}") return resize( image, size=(size["height"], size["width"]), resample=resample, data_format=data_format, **kwargs ) def rescale( self, image: np.ndarray, scale: float, data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs ) -> np.ndarray: """ Rescale an image by a scale factor. image = image * scale. Args: image (`np.ndarray`): Image to rescale. scale (`float`): The scaling factor to rescale pixel values by. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. Returns: `np.ndarray`: The rescaled image. """ return rescale(image, scale=scale, data_format=data_format, **kwargs) def normalize( self, image: np.ndarray, mean: Union[float, List[float]], std: Union[float, List[float]], data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs ) -> np.ndarray: """ Normalize an image. image = (image - image_mean) / image_std. Args: image (`np.ndarray`): Image to normalize. mean (`float` or `List[float]`): Image mean to use for normalization. std (`float` or `List[float]`): Image standard deviation to use for normalization. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. Returns: `np.ndarray`: The normalized image. """ return normalize(image, mean=mean, std=std, data_format=data_format, **kwargs) def preprocess( self, images: ImageInput, do_resize: Optional[bool] = None, size: Dict[str, int] = None, resample: PILImageResampling = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, **kwargs, ): """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after resizing. resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`): `PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean to use if `do_normalize` is set to `True`. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use if `do_normalize` is set to `True`. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. """ do_resize = do_resize if do_resize is not None else self.do_resize do_rescale = do_rescale if do_rescale is not None else self.do_rescale do_normalize = do_normalize if do_normalize is not None else self.do_normalize resample = resample if resample is not None else self.resample rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std size = size if size is not None else self.size size_dict = get_size_dict(size) if not is_batched(images): images = [images] if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True.") if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if do_resize: images = [self.resize(image=image, size=size_dict, resample=resample) for image in images] if do_rescale: images = [self.rescale(image=image, scale=rescale_factor) for image in images] if do_normalize: images = [self.normalize(image=image, mean=image_mean, std=image_std) for image in images] images = [to_channel_dimension_format(image, data_format) for image in images] data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors)
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./src/transformers/models/groupvit/modeling_groupvit.py
# coding=utf-8 # Copyright 2022 NVIDIA and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch GroupViT model.""" import collections.abc import math from dataclasses import dataclass from typing import Any, Optional, Tuple, Union import numpy as np import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from ...modeling_utils import PreTrainedModel from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_groupvit import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "nvidia/groupvit-gcc-yfcc" GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "nvidia/groupvit-gcc-yfcc", # See all GroupViT models at https://huggingface.co/models?filter=groupvit ] # Copied from transformers.models.bart.modeling_bart._expand_mask def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) # contrastive loss function, adapted from # https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/GroupViT.html def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device)) # Copied from transformers.models.clip.modeling_clip.clip_loss with clip->groupvit def groupvit_loss(similarity: torch.Tensor) -> torch.Tensor: caption_loss = contrastive_loss(similarity) image_loss = contrastive_loss(similarity.t()) return (caption_loss + image_loss) / 2.0 def hard_softmax(logits: torch.Tensor, dim: int): y_soft = logits.softmax(dim) # Straight through. index = y_soft.max(dim, keepdim=True)[1] y_hard = torch.zeros_like(logits, memory_format=torch.legacy_contiguous_format).scatter_(dim, index, 1.0) ret = y_hard - y_soft.detach() + y_soft return ret def gumbel_softmax(logits: torch.Tensor, tau: float = 1, hard: bool = False, dim: int = -1) -> torch.Tensor: # more stable https://github.com/pytorch/pytorch/issues/41663 gumbel_dist = torch.distributions.gumbel.Gumbel( torch.tensor(0.0, device=logits.device, dtype=logits.dtype), torch.tensor(1.0, device=logits.device, dtype=logits.dtype), ) gumbels = gumbel_dist.sample(logits.shape) gumbels = (logits + gumbels) / tau # ~Gumbel(logits,tau) y_soft = gumbels.softmax(dim) if hard: # Straight through. index = y_soft.max(dim, keepdim=True)[1] y_hard = torch.zeros_like(logits, memory_format=torch.legacy_contiguous_format).scatter_(dim, index, 1.0) ret = y_hard - y_soft.detach() + y_soft else: # Reparametrization trick. ret = y_soft return ret def resize_attention_map(attentions, height, width, align_corners=False): """ Args: attentions (`torch.Tensor`): attention map of shape [batch_size, groups, feat_height*feat_width] height (`int`): height of the output attention map width (`int`): width of the output attention map align_corners (`bool`, *optional*): the `align_corner` argument for `nn.functional.interpolate`. Returns: `torch.Tensor`: resized attention map of shape [batch_size, groups, height, width] """ scale = (height * width // attentions.shape[2]) ** 0.5 if height > width: feat_width = int(np.round(width / scale)) feat_height = attentions.shape[2] // feat_width else: feat_height = int(np.round(height / scale)) feat_width = attentions.shape[2] // feat_height batch_size = attentions.shape[0] groups = attentions.shape[1] # number of group token # [batch_size, groups, height*width, groups] -> [batch_size, groups, height, width] attentions = attentions.reshape(batch_size, groups, feat_height, feat_width) attentions = nn.functional.interpolate( attentions, size=(height, width), mode="bilinear", align_corners=align_corners ) return attentions def get_grouping_from_attentions(attentions, hw_shape): """ Args: attentions (`tuple(torch.FloatTensor)`: tuple of attention maps returned by `GroupViTVisionTransformer` hw_shape (`tuple(int)`): height and width of the output attention map Returns: `torch.Tensor`: the attention map of shape [batch_size, groups, height, width] """ attn_maps = [] with torch.no_grad(): prev_attn_masks = None for attn_masks in attentions: # [batch_size, num_groups, height x width] -> [batch_size, height x width, num_groups] attn_masks = attn_masks.permute(0, 2, 1).contiguous() if prev_attn_masks is None: prev_attn_masks = attn_masks else: prev_attn_masks = prev_attn_masks @ attn_masks # [batch_size, heightxwidth, num_groups] -> [batch_size, num_groups, heightxwidth] -> [batch_size, num_groups, height, width] cur_attn_map = resize_attention_map(prev_attn_masks.permute(0, 2, 1).contiguous(), *hw_shape) attn_maps.append(cur_attn_map) # [batch_size, num_groups, height, width] final_grouping = attn_maps[-1] return final_grouping class GroupViTCrossAttentionLayer(nn.Module): def __init__(self, config: GroupViTVisionConfig): super().__init__() self.attn = GroupViTAttention(config) self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.mlp = GroupViTMLP(config) self.norm_post = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, query, key): x = query x = x + self.attn(query, encoder_hidden_states=key)[0] x = x + self.mlp(self.norm2(x)) x = self.norm_post(x) return x class GroupViTAssignAttention(nn.Module): def __init__(self, config: GroupViTVisionConfig): super().__init__() self.scale = config.hidden_size**-0.5 self.q_proj = nn.Linear(config.hidden_size, config.hidden_size) self.k_proj = nn.Linear(config.hidden_size, config.hidden_size) self.v_proj = nn.Linear(config.hidden_size, config.hidden_size) self.proj = nn.Linear(config.hidden_size, config.hidden_size) self.assign_eps = config.assign_eps def get_attn(self, attn, gumbel=True, hard=True): if gumbel and self.training: attn = gumbel_softmax(attn, dim=-2, hard=hard) else: if hard: attn = hard_softmax(attn, dim=-2) else: attn = nn.functional.softmax(attn, dim=-2) return attn def forward(self, query, key): value = key # [batch_size, query_length, channels] query = self.q_proj(query) # [batch_size, key_length, channels] key = self.k_proj(key) # [batch_size, key_length, channels] value = self.v_proj(value) # [batch_size, query_length, key_length] raw_attn = (query @ key.transpose(-2, -1)) * self.scale attn = self.get_attn(raw_attn) soft_attn = self.get_attn(raw_attn, gumbel=False, hard=False) attn = attn / (attn.sum(dim=-1, keepdim=True) + self.assign_eps) out = attn @ value out = self.proj(out) return out, soft_attn class GroupViTTokenAssign(nn.Module): def __init__(self, config: GroupViTVisionConfig, num_group_token, num_output_group): super().__init__() self.num_output_group = num_output_group # norm on group_tokens self.norm_tokens = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) assign_mlp_ratio = ( config.assign_mlp_ratio if isinstance(config.assign_mlp_ratio, collections.abc.Iterable) else (config.assign_mlp_ratio, config.assign_mlp_ratio) ) tokens_dim, channels_dim = [int(x * config.hidden_size) for x in assign_mlp_ratio] self.mlp_inter = GroupViTMixerMLP(config, num_group_token, tokens_dim, num_output_group) self.norm_post_tokens = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) # norm on x self.norm_x = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pre_assign_attn = GroupViTCrossAttentionLayer(config) self.assign = GroupViTAssignAttention(config) self.norm_new_x = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.mlp_channels = GroupViTMLP(config, config.hidden_size, channels_dim, config.hidden_size) def project_group_token(self, group_tokens): """ Args: group_tokens (torch.Tensor): group tokens, [batch_size, num_group_tokens, channels] Returns: projected_group_tokens (torch.Tensor): [batch_size, num_output_groups, channels] """ # [B, num_output_groups, C] <- [B, num_group_tokens, C] projected_group_tokens = self.mlp_inter(group_tokens) projected_group_tokens = self.norm_post_tokens(projected_group_tokens) return projected_group_tokens def forward(self, image_tokens, group_tokens): """ Args: image_tokens (`torch.Tensor`): image tokens, of shape [batch_size, input_length, channels] group_tokens (`torch.Tensor`): group tokens, [batch_size, num_group_tokens, channels] """ group_tokens = self.norm_tokens(group_tokens) image_tokens = self.norm_x(image_tokens) # [batch_size, num_output_groups, channels] projected_group_tokens = self.project_group_token(group_tokens) projected_group_tokens = self.pre_assign_attn(projected_group_tokens, image_tokens) new_image_tokens, attention = self.assign(projected_group_tokens, image_tokens) new_image_tokens += projected_group_tokens new_image_tokens = new_image_tokens + self.mlp_channels(self.norm_new_x(new_image_tokens)) return new_image_tokens, attention @dataclass class GroupViTModelOutput(ModelOutput): """ Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for image-text similarity. logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text similarity scores. logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image similarity scores. segmentation_logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels, logits_height, logits_width)`): Classification scores for each pixel. <Tip warning={true}> The logits returned do not necessarily have the same size as the `pixel_values` passed as inputs. This is to avoid doing two interpolations and lose some quality when a user needs to resize the logits to the original image size as post-processing. You should always check your logits shape and resize as needed. </Tip> text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`GroupViTTextModel`]. image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`GroupViTVisionModel`]. text_model_output (`BaseModelOutputWithPooling`): The output of the [`GroupViTTextModel`]. vision_model_output (`BaseModelOutputWithPooling`): The output of the [`GroupViTVisionModel`]. """ loss: Optional[torch.FloatTensor] = None logits_per_image: torch.FloatTensor = None logits_per_text: torch.FloatTensor = None segmentation_logits: torch.FloatTensor = None text_embeds: torch.FloatTensor = None image_embeds: torch.FloatTensor = None text_model_output: BaseModelOutputWithPooling = None vision_model_output: BaseModelOutputWithPooling = None def to_tuple(self) -> Tuple[Any]: return tuple( self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() for k in self.keys() ) class GroupViTPatchEmbeddings(nn.Module): """ Image to Patch Embedding. """ def __init__( self, image_size: int = 224, patch_size: Union[int, Tuple[int, int]] = 16, num_channels: int = 3, embed_dim: int = 768, ): super().__init__() image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_patches = num_patches self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=patch_size) def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False) -> torch.Tensor: batch_size, num_channels, height, width = pixel_values.shape if not interpolate_pos_encoding: if height != self.image_size[0] or width != self.image_size[1]: raise ValueError( f"Input image size ({height}*{width}) doesn't match model" f" ({self.image_size[0]}*{self.image_size[1]})." ) x = self.projection(pixel_values).flatten(2).transpose(1, 2) return x class GroupViTVisionEmbeddings(nn.Module): def __init__(self, config: GroupViTVisionConfig): super().__init__() self.patch_embeddings = GroupViTPatchEmbeddings( image_size=config.image_size, patch_size=config.patch_size, num_channels=config.num_channels, embed_dim=config.hidden_size, ) num_patches = self.patch_embeddings.num_patches self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches, config.hidden_size)) self.dropout = nn.Dropout(config.dropout) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.config = config def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. Source: https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174 """ npatch = embeddings.shape[1] if npatch == self.position_embeddings.shape[1] and height == width: return self.position_embeddings patch_pos_embed = self.position_embeddings num_original_pos_embed = patch_pos_embed.shape[1] dim = embeddings.shape[-1] feat_height = height // self.config.patch_size feat_width = width // self.config.patch_size # we add a small number to avoid floating point error in the interpolation # see discussion at https://github.com/facebookresearch/dino/issues/8 feat_height, feat_width = feat_height + 0.1, feat_width + 0.1 original_height = original_width = math.sqrt(num_original_pos_embed) reshaped_patch_pos_embed = patch_pos_embed.reshape(1, int(original_height), int(original_width), dim).permute( 0, 3, 1, 2 ) scale_factor = (feat_height / original_height, feat_width / original_width) patch_pos_embed = nn.functional.interpolate( reshaped_patch_pos_embed, scale_factor=scale_factor, mode="bicubic", align_corners=False, ) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return patch_pos_embed def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False) -> torch.Tensor: batch_size, num_channels, height, width = pixel_values.shape embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) embeddings = self.layernorm(embeddings) batch_size, seq_len, _ = embeddings.size() # add positional encoding to each token if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embeddings embeddings = self.dropout(embeddings) return embeddings # Copied from transformers.models.clip.modeling_clip.CLIPTextEmbeddings with CLIP->GroupViT class GroupViTTextEmbeddings(nn.Module): def __init__(self, config: GroupViTTextConfig): super().__init__() embed_dim = config.hidden_size self.token_embedding = nn.Embedding(config.vocab_size, embed_dim) self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) def forward( self, input_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, ) -> torch.Tensor: seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if inputs_embeds is None: inputs_embeds = self.token_embedding(input_ids) position_embeddings = self.position_embedding(position_ids) embeddings = inputs_embeds + position_embeddings return embeddings class GroupViTStage(nn.Module): """This corresponds to the `GroupingLayer` class in the GroupViT implementation.""" def __init__( self, config: GroupViTVisionConfig, depth: int, num_prev_group_token: int, num_group_token: int, num_output_group: int, ): super().__init__() self.depth = depth self.num_group_token = num_group_token if num_group_token > 0: self.group_token = nn.Parameter(torch.zeros(1, num_group_token, config.hidden_size)) else: self.group_token = None self.gradient_checkpointing = False self.layers = nn.ModuleList([GroupViTEncoderLayer(config) for _ in range(depth)]) if num_group_token > 0: self.downsample = GroupViTTokenAssign( config=config, num_group_token=num_group_token, num_output_group=num_output_group, ) else: self.downsample = None if num_prev_group_token > 0 and num_group_token > 0: self.group_projector = nn.Sequential( nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps), GroupViTMixerMLP(config, num_prev_group_token, config.hidden_size // 2, num_group_token), ) else: self.group_projector = None @property def with_group_token(self): return self.group_token is not None def split_x(self, x): if self.with_group_token: return x[:, : -self.num_group_token], x[:, -self.num_group_token :] else: return x, None def concat_x(self, x: torch.Tensor, group_token: Optional[torch.Tensor] = None) -> torch.Tensor: if group_token is None: return x return torch.cat([x, group_token], dim=1) def forward( self, hidden_states: torch.Tensor, prev_group_token: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.FloatTensor]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. `(config.encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the grouping tensors of Grouping block. """ if self.with_group_token: group_token = self.group_token.expand(hidden_states.size(0), -1, -1) if self.group_projector is not None: group_token = group_token + self.group_projector(prev_group_token) else: group_token = None x = hidden_states cat_x = self.concat_x(x, group_token) for layer in self.layers: layer_out = layer(cat_x, attention_mask=None, causal_attention_mask=None) cat_x = layer_out[0] x, group_token = self.split_x(cat_x) attention = None if self.downsample is not None: x, attention = self.downsample(x, group_token) outputs = (x, group_token) if output_attentions: outputs = outputs + (attention,) return outputs class GroupViTMLP(nn.Module): def __init__( self, config: GroupViTVisionConfig, hidden_size: Optional[int] = None, intermediate_size: Optional[int] = None, output_size: Optional[int] = None, ): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] hidden_size = hidden_size if hidden_size is not None else config.hidden_size intermediate_size = intermediate_size if intermediate_size is not None else config.intermediate_size output_size = output_size if output_size is not None else hidden_size self.fc1 = nn.Linear(hidden_size, intermediate_size) self.fc2 = nn.Linear(intermediate_size, output_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class GroupViTMixerMLP(GroupViTMLP): def forward(self, x): x = super().forward(x.transpose(1, 2)) return x.transpose(1, 2) class GroupViTAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale = self.head_dim**-0.5 self.dropout = config.attention_dropout self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, causal_attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" bsz, tgt_len, embed_dim = hidden_states.size() is_cross_attention = encoder_hidden_states is not None # get query proj query_states = self.q_proj(hidden_states) * self.scale if is_cross_attention: key_states = self._shape(self.k_proj(encoder_hidden_states), -1, bsz) value_states = self._shape(self.v_proj(encoder_hidden_states), -1, bsz) else: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) # apply the causal_attention_mask first if causal_attention_mask is not None: if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" f" {causal_attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: # this operation is a bit akward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped # Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->GroupViT class GroupViTEncoderLayer(nn.Module): def __init__(self, config: GroupViTConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = GroupViTAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim) self.mlp = GroupViTMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.FloatTensor]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. `(config.encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class GroupViTPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = GroupViTConfig base_model_prefix = "groupvit" supports_gradient_checkpointing = True _keys_to_ignore_on_load_missing = [r"position_ids"] def _init_weights(self, module): """Initialize the weights""" init_range = self.config.initializer_range if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=init_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) factor = self.config.initializer_factor if isinstance(module, GroupViTTextEmbeddings): module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) elif isinstance(module, GroupViTAttention): factor = self.config.initializer_factor in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor out_proj_std = (module.embed_dim**-0.5) * factor nn.init.normal_(module.q_proj.weight, std=in_proj_std) nn.init.normal_(module.k_proj.weight, std=in_proj_std) nn.init.normal_(module.v_proj.weight, std=in_proj_std) nn.init.normal_(module.out_proj.weight, std=out_proj_std) elif isinstance(module, GroupViTMLP): factor = self.config.initializer_factor in_proj_std = ( (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor ) fc_std = (2 * module.config.hidden_size) ** -0.5 * factor nn.init.normal_(module.fc1.weight, std=fc_std) nn.init.normal_(module.fc2.weight, std=in_proj_std) def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (GroupViTTextEncoder, GroupViTVisionEncoder)): module.gradient_checkpointing = value GROUPVIT_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`GroupViTConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ GROUPVIT_TEXT_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`CLIPTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ GROUPVIT_VISION_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`CLIPFeatureExtractor`]. See [`CLIPFeatureExtractor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ GROUPVIT_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`CLIPTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`CLIPFeatureExtractor`]. See [`CLIPFeatureExtractor.__call__`] for details. return_loss (`bool`, *optional*): Whether or not to return the contrastive loss. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class GroupViTVisionEncoder(nn.Module): def __init__(self, config: GroupViTVisionConfig) -> None: super().__init__() self.config = config self.stages = nn.ModuleList( [ GroupViTStage( config=config, depth=config.depths[i], num_group_token=config.num_group_tokens[i], num_output_group=config.num_output_groups[i], num_prev_group_token=config.num_output_groups[i - 1] if i > 0 else 0, ) for i in range(len(config.depths)) ] ) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict all_hidden_states = () if output_hidden_states else None all_groupings = () if output_attentions else None group_tokens = None for i, stage in enumerate(self.stages): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = stage(hidden_states, group_tokens, output_attentions) hidden_states = layer_outputs[0] group_tokens = layer_outputs[1] if output_attentions and layer_outputs[2] is not None: all_groupings = all_groupings + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_groupings] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_groupings ) class GroupViTTextEncoder(nn.Module): """ Transformer encoder consisting of `config.num_hidden_layers` self-attention layers. Each layer is a [`GroupViTEncoderLayer`]. Args: config: GroupViTTextConfig """ def __init__(self, config: GroupViTTextConfig): super().__init__() self.config = config self.layers = nn.ModuleList([GroupViTEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, causal_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Causal mask for the text model. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(encoder_layer), hidden_states, attention_mask, causal_attention_mask, ) else: layer_outputs = encoder_layer( hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) # Copied from transformers.models.clip.modeling_clip.CLIPTextTransformer with CLIPText->GroupViTText, CLIPEncoder->GroupViTTextEncoder, CLIP_TEXT->GROUPVIT_TEXT class GroupViTTextTransformer(nn.Module): def __init__(self, config: GroupViTTextConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = GroupViTTextEmbeddings(config) self.encoder = GroupViTTextEncoder(config) self.final_layer_norm = nn.LayerNorm(embed_dim) @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=GroupViTTextConfig) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is None: raise ValueError("You have to specify either input_ids") input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids) bsz, seq_len = input_shape # CLIP's text model uses causal mask, prepare it here. # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324 causal_attention_mask = self._build_causal_attention_mask(bsz, seq_len, hidden_states.dtype).to( hidden_states.device ) # expand attention_mask if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _expand_mask(attention_mask, hidden_states.dtype) encoder_outputs = self.encoder( inputs_embeds=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] last_hidden_state = self.final_layer_norm(last_hidden_state) # text_embeds.shape = [batch_size, sequence_length, transformer.width] # take features from the eot embedding (eot_token is the highest number in each sequence) # casting to torch.int for onnx compatibility: argmax doesn't support int64 inputs with opset 14 pooled_output = last_hidden_state[ torch.arange(last_hidden_state.shape[0], device=input_ids.device), input_ids.to(torch.int).argmax(dim=-1) ] if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) def _build_causal_attention_mask(self, bsz, seq_len, dtype): # lazily create causal attention mask, with full attention between the vision tokens # pytorch uses additive attention mask; fill with -inf mask = torch.empty(bsz, seq_len, seq_len, dtype=dtype) mask.fill_(torch.tensor(torch.finfo(dtype).min)) mask.triu_(1) # zero out the lower diagonal mask = mask.unsqueeze(1) # expand mask return mask class GroupViTTextModel(GroupViTPreTrainedModel): config_class = GroupViTTextConfig def __init__(self, config: GroupViTTextConfig): super().__init__(config) self.text_model = GroupViTTextTransformer(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.text_model.embeddings.token_embedding def set_input_embeddings(self, value): self.text_model.embeddings.token_embedding = value @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=GroupViTTextConfig) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: Examples: ```python >>> from transformers import CLIPTokenizer, GroupViTTextModel >>> tokenizer = CLIPTokenizer.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> model = GroupViTTextModel.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled (EOS token) states ```""" return self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) class GroupViTVisionTransformer(nn.Module): def __init__(self, config: GroupViTVisionConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = GroupViTVisionEmbeddings(config) self.encoder = GroupViTVisionEncoder(config) self.layernorm = nn.LayerNorm(embed_dim) @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=GroupViTVisionConfig) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.embeddings(pixel_values) encoder_outputs = self.encoder( hidden_states=hidden_states, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] # normalize the last hidden state last_hidden_state = self.layernorm(last_hidden_state) pooled_output = last_hidden_state.mean(dim=1) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class GroupViTVisionModel(GroupViTPreTrainedModel): config_class = GroupViTVisionConfig main_input_name = "pixel_values" def __init__(self, config: GroupViTVisionConfig): super().__init__(config) self.vision_model = GroupViTVisionTransformer(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> GroupViTPatchEmbeddings: return self.vision_model.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=GroupViTVisionConfig) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, GroupViTVisionModel >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> model = GroupViTVisionModel.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled CLS states ```""" return self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) @add_start_docstrings(GROUPVIT_START_DOCSTRING) class GroupViTModel(GroupViTPreTrainedModel): config_class = GroupViTConfig def __init__(self, config: GroupViTConfig): super().__init__(config) if not isinstance(config.text_config, GroupViTTextConfig): raise ValueError( "config.text_config is expected to be of type GroupViTTextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.vision_config, GroupViTVisionConfig): raise ValueError( "config.vision_config is expected to be of type GroupViTVisionConfig but is of type" f" {type(config.vision_config)}." ) text_config = config.text_config vision_config = config.vision_config self.projection_dim = config.projection_dim self.projection_intermediate_dim = config.projection_intermediate_dim self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size self.text_model = GroupViTTextTransformer(text_config) self.vision_model = GroupViTVisionTransformer(vision_config) self.visual_projection = nn.Sequential( nn.Linear(self.vision_embed_dim, self.projection_intermediate_dim, bias=True), nn.BatchNorm1d(self.projection_intermediate_dim), nn.ReLU(inplace=True), nn.Linear(self.projection_intermediate_dim, self.projection_dim, bias=True), ) self.text_projection = nn.Sequential( nn.Linear(self.text_embed_dim, self.projection_intermediate_dim, bias=True), nn.BatchNorm1d(self.projection_intermediate_dim), nn.ReLU(inplace=True), nn.Linear(self.projection_intermediate_dim, self.projection_dim, bias=True), ) self.logit_scale = nn.Parameter(torch.ones([]) * self.config.logit_scale_init_value) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING) def get_text_features( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> torch.FloatTensor: r""" Returns: text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`GroupViTTextModel`]. Examples: ```python >>> from transformers import CLIPTokenizer, GroupViTModel >>> model = GroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> tokenizer = CLIPTokenizer.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") >>> text_features = model.get_text_features(**inputs) ```""" # Use GROUPVIT model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = text_outputs[1] text_features = self.text_projection(pooled_output) return text_features @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING) def get_image_features( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> torch.FloatTensor: r""" Returns: image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`GroupViTVisionModel`]. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, GroupViTModel >>> model = GroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> image_features = model.get_image_features(**inputs) ```""" # Use GROUPVIT model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = vision_outputs[1] # pooled_output image_features = self.visual_projection(pooled_output) return image_features @add_start_docstrings_to_model_forward(GROUPVIT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=GroupViTModelOutput, config_class=GroupViTConfig) def forward( self, input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, return_loss: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_segmentation: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, GroupViTModelOutput]: r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, GroupViTModel >>> model = GroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor( ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True ... ) >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ```""" # Use GROUPVIT model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_segmentation = ( output_segmentation if output_segmentation is not None else self.config.output_segmentation ) if output_segmentation: output_attentions = True output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) image_embeds = vision_outputs[1] image_embeds = self.visual_projection(image_embeds) text_embeds = text_outputs[1] text_embeds = self.text_projection(text_embeds) # normalized features image_embeds = image_embeds / image_embeds.norm(dim=-1, keepdim=True) text_embeds = text_embeds / text_embeds.norm(dim=-1, keepdim=True) # cosine similarity as logits logit_scale = self.logit_scale.exp() logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale logits_per_image = logits_per_text.t() seg_logits = None if output_segmentation: # grouped features # [batch_size_image, num_group, hidden_size] image_group_embeds = vision_outputs[0] # [batch_size_image*num_group, hidden_size] image_group_embeds = self.visual_projection(image_group_embeds.reshape(-1, image_group_embeds.shape[-1])) if output_hidden_states: attentions = vision_outputs[3] else: attentions = vision_outputs[2] # [batch_size_image, num_group, height, width] grouping = get_grouping_from_attentions(attentions, pixel_values.shape[2:]) # normalized features image_group_embeds = image_group_embeds / image_group_embeds.norm(dim=-1, keepdim=True) # [batch_size_image x num_group, batch_size_text] logits_per_image_group = torch.matmul(image_group_embeds, text_embeds.t()) * logit_scale # [batch_size_image, batch_size_text, num_group] logits_per_image_group = logits_per_image_group.reshape( image_embeds.shape[0], -1, text_embeds.shape[0] ).permute(0, 2, 1) # [batch_size_image, batch_size_text, height x width] flatten_grouping = grouping.reshape(grouping.shape[0], grouping.shape[1], -1) # [batch_size_image, batch_size_text, height, width] seg_logits = torch.matmul(logits_per_image_group, flatten_grouping) * logit_scale seg_logits = seg_logits.reshape( seg_logits.shape[0], seg_logits.shape[1], grouping.shape[2], grouping.shape[3] ) loss = None if return_loss: loss = groupvit_loss(logits_per_text) if not return_dict: if seg_logits is not None: output = ( logits_per_image, logits_per_text, seg_logits, text_embeds, image_embeds, text_outputs, vision_outputs, ) else: output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) return ((loss,) + output) if loss is not None else output return GroupViTModelOutput( loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, segmentation_logits=seg_logits, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs, )
# coding=utf-8 # Copyright 2022 NVIDIA and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch GroupViT model.""" import collections.abc import math from dataclasses import dataclass from typing import Any, Optional, Tuple, Union import numpy as np import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from ...modeling_utils import PreTrainedModel from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_groupvit import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "nvidia/groupvit-gcc-yfcc" GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "nvidia/groupvit-gcc-yfcc", # See all GroupViT models at https://huggingface.co/models?filter=groupvit ] # Copied from transformers.models.bart.modeling_bart._expand_mask def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) # contrastive loss function, adapted from # https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/GroupViT.html def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device)) # Copied from transformers.models.clip.modeling_clip.clip_loss with clip->groupvit def groupvit_loss(similarity: torch.Tensor) -> torch.Tensor: caption_loss = contrastive_loss(similarity) image_loss = contrastive_loss(similarity.t()) return (caption_loss + image_loss) / 2.0 def hard_softmax(logits: torch.Tensor, dim: int): y_soft = logits.softmax(dim) # Straight through. index = y_soft.max(dim, keepdim=True)[1] y_hard = torch.zeros_like(logits, memory_format=torch.legacy_contiguous_format).scatter_(dim, index, 1.0) ret = y_hard - y_soft.detach() + y_soft return ret def gumbel_softmax(logits: torch.Tensor, tau: float = 1, hard: bool = False, dim: int = -1) -> torch.Tensor: # more stable https://github.com/pytorch/pytorch/issues/41663 gumbel_dist = torch.distributions.gumbel.Gumbel( torch.tensor(0.0, device=logits.device, dtype=logits.dtype), torch.tensor(1.0, device=logits.device, dtype=logits.dtype), ) gumbels = gumbel_dist.sample(logits.shape) gumbels = (logits + gumbels) / tau # ~Gumbel(logits,tau) y_soft = gumbels.softmax(dim) if hard: # Straight through. index = y_soft.max(dim, keepdim=True)[1] y_hard = torch.zeros_like(logits, memory_format=torch.legacy_contiguous_format).scatter_(dim, index, 1.0) ret = y_hard - y_soft.detach() + y_soft else: # Reparametrization trick. ret = y_soft return ret def resize_attention_map(attentions, height, width, align_corners=False): """ Args: attentions (`torch.Tensor`): attention map of shape [batch_size, groups, feat_height*feat_width] height (`int`): height of the output attention map width (`int`): width of the output attention map align_corners (`bool`, *optional*): the `align_corner` argument for `nn.functional.interpolate`. Returns: `torch.Tensor`: resized attention map of shape [batch_size, groups, height, width] """ scale = (height * width // attentions.shape[2]) ** 0.5 if height > width: feat_width = int(np.round(width / scale)) feat_height = attentions.shape[2] // feat_width else: feat_height = int(np.round(height / scale)) feat_width = attentions.shape[2] // feat_height batch_size = attentions.shape[0] groups = attentions.shape[1] # number of group token # [batch_size, groups, height*width, groups] -> [batch_size, groups, height, width] attentions = attentions.reshape(batch_size, groups, feat_height, feat_width) attentions = nn.functional.interpolate( attentions, size=(height, width), mode="bilinear", align_corners=align_corners ) return attentions def get_grouping_from_attentions(attentions, hw_shape): """ Args: attentions (`tuple(torch.FloatTensor)`: tuple of attention maps returned by `GroupViTVisionTransformer` hw_shape (`tuple(int)`): height and width of the output attention map Returns: `torch.Tensor`: the attention map of shape [batch_size, groups, height, width] """ attn_maps = [] with torch.no_grad(): prev_attn_masks = None for attn_masks in attentions: # [batch_size, num_groups, height x width] -> [batch_size, height x width, num_groups] attn_masks = attn_masks.permute(0, 2, 1).contiguous() if prev_attn_masks is None: prev_attn_masks = attn_masks else: prev_attn_masks = prev_attn_masks @ attn_masks # [batch_size, heightxwidth, num_groups] -> [batch_size, num_groups, heightxwidth] -> [batch_size, num_groups, height, width] cur_attn_map = resize_attention_map(prev_attn_masks.permute(0, 2, 1).contiguous(), *hw_shape) attn_maps.append(cur_attn_map) # [batch_size, num_groups, height, width] final_grouping = attn_maps[-1] return final_grouping class GroupViTCrossAttentionLayer(nn.Module): def __init__(self, config: GroupViTVisionConfig): super().__init__() self.attn = GroupViTAttention(config) self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.mlp = GroupViTMLP(config) self.norm_post = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, query, key): x = query x = x + self.attn(query, encoder_hidden_states=key)[0] x = x + self.mlp(self.norm2(x)) x = self.norm_post(x) return x class GroupViTAssignAttention(nn.Module): def __init__(self, config: GroupViTVisionConfig): super().__init__() self.scale = config.hidden_size**-0.5 self.q_proj = nn.Linear(config.hidden_size, config.hidden_size) self.k_proj = nn.Linear(config.hidden_size, config.hidden_size) self.v_proj = nn.Linear(config.hidden_size, config.hidden_size) self.proj = nn.Linear(config.hidden_size, config.hidden_size) self.assign_eps = config.assign_eps def get_attn(self, attn, gumbel=True, hard=True): if gumbel and self.training: attn = gumbel_softmax(attn, dim=-2, hard=hard) else: if hard: attn = hard_softmax(attn, dim=-2) else: attn = nn.functional.softmax(attn, dim=-2) return attn def forward(self, query, key): value = key # [batch_size, query_length, channels] query = self.q_proj(query) # [batch_size, key_length, channels] key = self.k_proj(key) # [batch_size, key_length, channels] value = self.v_proj(value) # [batch_size, query_length, key_length] raw_attn = (query @ key.transpose(-2, -1)) * self.scale attn = self.get_attn(raw_attn) soft_attn = self.get_attn(raw_attn, gumbel=False, hard=False) attn = attn / (attn.sum(dim=-1, keepdim=True) + self.assign_eps) out = attn @ value out = self.proj(out) return out, soft_attn class GroupViTTokenAssign(nn.Module): def __init__(self, config: GroupViTVisionConfig, num_group_token, num_output_group): super().__init__() self.num_output_group = num_output_group # norm on group_tokens self.norm_tokens = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) assign_mlp_ratio = ( config.assign_mlp_ratio if isinstance(config.assign_mlp_ratio, collections.abc.Iterable) else (config.assign_mlp_ratio, config.assign_mlp_ratio) ) tokens_dim, channels_dim = [int(x * config.hidden_size) for x in assign_mlp_ratio] self.mlp_inter = GroupViTMixerMLP(config, num_group_token, tokens_dim, num_output_group) self.norm_post_tokens = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) # norm on x self.norm_x = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pre_assign_attn = GroupViTCrossAttentionLayer(config) self.assign = GroupViTAssignAttention(config) self.norm_new_x = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.mlp_channels = GroupViTMLP(config, config.hidden_size, channels_dim, config.hidden_size) def project_group_token(self, group_tokens): """ Args: group_tokens (torch.Tensor): group tokens, [batch_size, num_group_tokens, channels] Returns: projected_group_tokens (torch.Tensor): [batch_size, num_output_groups, channels] """ # [B, num_output_groups, C] <- [B, num_group_tokens, C] projected_group_tokens = self.mlp_inter(group_tokens) projected_group_tokens = self.norm_post_tokens(projected_group_tokens) return projected_group_tokens def forward(self, image_tokens, group_tokens): """ Args: image_tokens (`torch.Tensor`): image tokens, of shape [batch_size, input_length, channels] group_tokens (`torch.Tensor`): group tokens, [batch_size, num_group_tokens, channels] """ group_tokens = self.norm_tokens(group_tokens) image_tokens = self.norm_x(image_tokens) # [batch_size, num_output_groups, channels] projected_group_tokens = self.project_group_token(group_tokens) projected_group_tokens = self.pre_assign_attn(projected_group_tokens, image_tokens) new_image_tokens, attention = self.assign(projected_group_tokens, image_tokens) new_image_tokens += projected_group_tokens new_image_tokens = new_image_tokens + self.mlp_channels(self.norm_new_x(new_image_tokens)) return new_image_tokens, attention @dataclass class GroupViTModelOutput(ModelOutput): """ Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for image-text similarity. logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text similarity scores. logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image similarity scores. segmentation_logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels, logits_height, logits_width)`): Classification scores for each pixel. <Tip warning={true}> The logits returned do not necessarily have the same size as the `pixel_values` passed as inputs. This is to avoid doing two interpolations and lose some quality when a user needs to resize the logits to the original image size as post-processing. You should always check your logits shape and resize as needed. </Tip> text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`GroupViTTextModel`]. image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`GroupViTVisionModel`]. text_model_output (`BaseModelOutputWithPooling`): The output of the [`GroupViTTextModel`]. vision_model_output (`BaseModelOutputWithPooling`): The output of the [`GroupViTVisionModel`]. """ loss: Optional[torch.FloatTensor] = None logits_per_image: torch.FloatTensor = None logits_per_text: torch.FloatTensor = None segmentation_logits: torch.FloatTensor = None text_embeds: torch.FloatTensor = None image_embeds: torch.FloatTensor = None text_model_output: BaseModelOutputWithPooling = None vision_model_output: BaseModelOutputWithPooling = None def to_tuple(self) -> Tuple[Any]: return tuple( self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() for k in self.keys() ) class GroupViTPatchEmbeddings(nn.Module): """ Image to Patch Embedding. """ def __init__( self, image_size: int = 224, patch_size: Union[int, Tuple[int, int]] = 16, num_channels: int = 3, embed_dim: int = 768, ): super().__init__() image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_patches = num_patches self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=patch_size) def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False) -> torch.Tensor: batch_size, num_channels, height, width = pixel_values.shape if not interpolate_pos_encoding: if height != self.image_size[0] or width != self.image_size[1]: raise ValueError( f"Input image size ({height}*{width}) doesn't match model" f" ({self.image_size[0]}*{self.image_size[1]})." ) x = self.projection(pixel_values).flatten(2).transpose(1, 2) return x class GroupViTVisionEmbeddings(nn.Module): def __init__(self, config: GroupViTVisionConfig): super().__init__() self.patch_embeddings = GroupViTPatchEmbeddings( image_size=config.image_size, patch_size=config.patch_size, num_channels=config.num_channels, embed_dim=config.hidden_size, ) num_patches = self.patch_embeddings.num_patches self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches, config.hidden_size)) self.dropout = nn.Dropout(config.dropout) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.config = config def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. Source: https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174 """ npatch = embeddings.shape[1] if npatch == self.position_embeddings.shape[1] and height == width: return self.position_embeddings patch_pos_embed = self.position_embeddings num_original_pos_embed = patch_pos_embed.shape[1] dim = embeddings.shape[-1] feat_height = height // self.config.patch_size feat_width = width // self.config.patch_size # we add a small number to avoid floating point error in the interpolation # see discussion at https://github.com/facebookresearch/dino/issues/8 feat_height, feat_width = feat_height + 0.1, feat_width + 0.1 original_height = original_width = math.sqrt(num_original_pos_embed) reshaped_patch_pos_embed = patch_pos_embed.reshape(1, int(original_height), int(original_width), dim).permute( 0, 3, 1, 2 ) scale_factor = (feat_height / original_height, feat_width / original_width) patch_pos_embed = nn.functional.interpolate( reshaped_patch_pos_embed, scale_factor=scale_factor, mode="bicubic", align_corners=False, ) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return patch_pos_embed def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False) -> torch.Tensor: batch_size, num_channels, height, width = pixel_values.shape embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) embeddings = self.layernorm(embeddings) batch_size, seq_len, _ = embeddings.size() # add positional encoding to each token if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embeddings embeddings = self.dropout(embeddings) return embeddings # Copied from transformers.models.clip.modeling_clip.CLIPTextEmbeddings with CLIP->GroupViT class GroupViTTextEmbeddings(nn.Module): def __init__(self, config: GroupViTTextConfig): super().__init__() embed_dim = config.hidden_size self.token_embedding = nn.Embedding(config.vocab_size, embed_dim) self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) def forward( self, input_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, ) -> torch.Tensor: seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if inputs_embeds is None: inputs_embeds = self.token_embedding(input_ids) position_embeddings = self.position_embedding(position_ids) embeddings = inputs_embeds + position_embeddings return embeddings class GroupViTStage(nn.Module): """This corresponds to the `GroupingLayer` class in the GroupViT implementation.""" def __init__( self, config: GroupViTVisionConfig, depth: int, num_prev_group_token: int, num_group_token: int, num_output_group: int, ): super().__init__() self.depth = depth self.num_group_token = num_group_token if num_group_token > 0: self.group_token = nn.Parameter(torch.zeros(1, num_group_token, config.hidden_size)) else: self.group_token = None self.gradient_checkpointing = False self.layers = nn.ModuleList([GroupViTEncoderLayer(config) for _ in range(depth)]) if num_group_token > 0: self.downsample = GroupViTTokenAssign( config=config, num_group_token=num_group_token, num_output_group=num_output_group, ) else: self.downsample = None if num_prev_group_token > 0 and num_group_token > 0: self.group_projector = nn.Sequential( nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps), GroupViTMixerMLP(config, num_prev_group_token, config.hidden_size // 2, num_group_token), ) else: self.group_projector = None @property def with_group_token(self): return self.group_token is not None def split_x(self, x): if self.with_group_token: return x[:, : -self.num_group_token], x[:, -self.num_group_token :] else: return x, None def concat_x(self, x: torch.Tensor, group_token: Optional[torch.Tensor] = None) -> torch.Tensor: if group_token is None: return x return torch.cat([x, group_token], dim=1) def forward( self, hidden_states: torch.Tensor, prev_group_token: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.FloatTensor]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. `(config.encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the grouping tensors of Grouping block. """ if self.with_group_token: group_token = self.group_token.expand(hidden_states.size(0), -1, -1) if self.group_projector is not None: group_token = group_token + self.group_projector(prev_group_token) else: group_token = None x = hidden_states cat_x = self.concat_x(x, group_token) for layer in self.layers: layer_out = layer(cat_x, attention_mask=None, causal_attention_mask=None) cat_x = layer_out[0] x, group_token = self.split_x(cat_x) attention = None if self.downsample is not None: x, attention = self.downsample(x, group_token) outputs = (x, group_token) if output_attentions: outputs = outputs + (attention,) return outputs class GroupViTMLP(nn.Module): def __init__( self, config: GroupViTVisionConfig, hidden_size: Optional[int] = None, intermediate_size: Optional[int] = None, output_size: Optional[int] = None, ): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] hidden_size = hidden_size if hidden_size is not None else config.hidden_size intermediate_size = intermediate_size if intermediate_size is not None else config.intermediate_size output_size = output_size if output_size is not None else hidden_size self.fc1 = nn.Linear(hidden_size, intermediate_size) self.fc2 = nn.Linear(intermediate_size, output_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class GroupViTMixerMLP(GroupViTMLP): def forward(self, x): x = super().forward(x.transpose(1, 2)) return x.transpose(1, 2) class GroupViTAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale = self.head_dim**-0.5 self.dropout = config.attention_dropout self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, causal_attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" bsz, tgt_len, embed_dim = hidden_states.size() is_cross_attention = encoder_hidden_states is not None # get query proj query_states = self.q_proj(hidden_states) * self.scale if is_cross_attention: key_states = self._shape(self.k_proj(encoder_hidden_states), -1, bsz) value_states = self._shape(self.v_proj(encoder_hidden_states), -1, bsz) else: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) # apply the causal_attention_mask first if causal_attention_mask is not None: if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" f" {causal_attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: # this operation is a bit akward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped # Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->GroupViT class GroupViTEncoderLayer(nn.Module): def __init__(self, config: GroupViTConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = GroupViTAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim) self.mlp = GroupViTMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.FloatTensor]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. `(config.encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class GroupViTPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = GroupViTConfig base_model_prefix = "groupvit" supports_gradient_checkpointing = True _keys_to_ignore_on_load_missing = [r"position_ids"] def _init_weights(self, module): """Initialize the weights""" init_range = self.config.initializer_range if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=init_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) factor = self.config.initializer_factor if isinstance(module, GroupViTTextEmbeddings): module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) elif isinstance(module, GroupViTAttention): factor = self.config.initializer_factor in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor out_proj_std = (module.embed_dim**-0.5) * factor nn.init.normal_(module.q_proj.weight, std=in_proj_std) nn.init.normal_(module.k_proj.weight, std=in_proj_std) nn.init.normal_(module.v_proj.weight, std=in_proj_std) nn.init.normal_(module.out_proj.weight, std=out_proj_std) elif isinstance(module, GroupViTMLP): factor = self.config.initializer_factor in_proj_std = ( (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor ) fc_std = (2 * module.config.hidden_size) ** -0.5 * factor nn.init.normal_(module.fc1.weight, std=fc_std) nn.init.normal_(module.fc2.weight, std=in_proj_std) def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (GroupViTTextEncoder, GroupViTVisionEncoder)): module.gradient_checkpointing = value GROUPVIT_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`GroupViTConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ GROUPVIT_TEXT_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`CLIPTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ GROUPVIT_VISION_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`CLIPFeatureExtractor`]. See [`CLIPFeatureExtractor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ GROUPVIT_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`CLIPTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`CLIPFeatureExtractor`]. See [`CLIPFeatureExtractor.__call__`] for details. return_loss (`bool`, *optional*): Whether or not to return the contrastive loss. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class GroupViTVisionEncoder(nn.Module): def __init__(self, config: GroupViTVisionConfig) -> None: super().__init__() self.config = config self.stages = nn.ModuleList( [ GroupViTStage( config=config, depth=config.depths[i], num_group_token=config.num_group_tokens[i], num_output_group=config.num_output_groups[i], num_prev_group_token=config.num_output_groups[i - 1] if i > 0 else 0, ) for i in range(len(config.depths)) ] ) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict all_hidden_states = () if output_hidden_states else None all_groupings = () if output_attentions else None group_tokens = None for i, stage in enumerate(self.stages): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = stage(hidden_states, group_tokens, output_attentions) hidden_states = layer_outputs[0] group_tokens = layer_outputs[1] if output_attentions and layer_outputs[2] is not None: all_groupings = all_groupings + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_groupings] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_groupings ) class GroupViTTextEncoder(nn.Module): """ Transformer encoder consisting of `config.num_hidden_layers` self-attention layers. Each layer is a [`GroupViTEncoderLayer`]. Args: config: GroupViTTextConfig """ def __init__(self, config: GroupViTTextConfig): super().__init__() self.config = config self.layers = nn.ModuleList([GroupViTEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, causal_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Causal mask for the text model. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(encoder_layer), hidden_states, attention_mask, causal_attention_mask, ) else: layer_outputs = encoder_layer( hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) # Copied from transformers.models.clip.modeling_clip.CLIPTextTransformer with CLIPText->GroupViTText, CLIPEncoder->GroupViTTextEncoder, CLIP_TEXT->GROUPVIT_TEXT class GroupViTTextTransformer(nn.Module): def __init__(self, config: GroupViTTextConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = GroupViTTextEmbeddings(config) self.encoder = GroupViTTextEncoder(config) self.final_layer_norm = nn.LayerNorm(embed_dim) @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=GroupViTTextConfig) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is None: raise ValueError("You have to specify either input_ids") input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids) bsz, seq_len = input_shape # CLIP's text model uses causal mask, prepare it here. # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324 causal_attention_mask = self._build_causal_attention_mask(bsz, seq_len, hidden_states.dtype).to( hidden_states.device ) # expand attention_mask if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _expand_mask(attention_mask, hidden_states.dtype) encoder_outputs = self.encoder( inputs_embeds=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] last_hidden_state = self.final_layer_norm(last_hidden_state) # text_embeds.shape = [batch_size, sequence_length, transformer.width] # take features from the eot embedding (eot_token is the highest number in each sequence) # casting to torch.int for onnx compatibility: argmax doesn't support int64 inputs with opset 14 pooled_output = last_hidden_state[ torch.arange(last_hidden_state.shape[0], device=input_ids.device), input_ids.to(torch.int).argmax(dim=-1) ] if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) def _build_causal_attention_mask(self, bsz, seq_len, dtype): # lazily create causal attention mask, with full attention between the vision tokens # pytorch uses additive attention mask; fill with -inf mask = torch.empty(bsz, seq_len, seq_len, dtype=dtype) mask.fill_(torch.tensor(torch.finfo(dtype).min)) mask.triu_(1) # zero out the lower diagonal mask = mask.unsqueeze(1) # expand mask return mask class GroupViTTextModel(GroupViTPreTrainedModel): config_class = GroupViTTextConfig def __init__(self, config: GroupViTTextConfig): super().__init__(config) self.text_model = GroupViTTextTransformer(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.text_model.embeddings.token_embedding def set_input_embeddings(self, value): self.text_model.embeddings.token_embedding = value @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=GroupViTTextConfig) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: Examples: ```python >>> from transformers import CLIPTokenizer, GroupViTTextModel >>> tokenizer = CLIPTokenizer.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> model = GroupViTTextModel.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled (EOS token) states ```""" return self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) class GroupViTVisionTransformer(nn.Module): def __init__(self, config: GroupViTVisionConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = GroupViTVisionEmbeddings(config) self.encoder = GroupViTVisionEncoder(config) self.layernorm = nn.LayerNorm(embed_dim) @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=GroupViTVisionConfig) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.embeddings(pixel_values) encoder_outputs = self.encoder( hidden_states=hidden_states, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] # normalize the last hidden state last_hidden_state = self.layernorm(last_hidden_state) pooled_output = last_hidden_state.mean(dim=1) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class GroupViTVisionModel(GroupViTPreTrainedModel): config_class = GroupViTVisionConfig main_input_name = "pixel_values" def __init__(self, config: GroupViTVisionConfig): super().__init__(config) self.vision_model = GroupViTVisionTransformer(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> GroupViTPatchEmbeddings: return self.vision_model.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=GroupViTVisionConfig) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, GroupViTVisionModel >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> model = GroupViTVisionModel.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled CLS states ```""" return self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) @add_start_docstrings(GROUPVIT_START_DOCSTRING) class GroupViTModel(GroupViTPreTrainedModel): config_class = GroupViTConfig def __init__(self, config: GroupViTConfig): super().__init__(config) if not isinstance(config.text_config, GroupViTTextConfig): raise ValueError( "config.text_config is expected to be of type GroupViTTextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.vision_config, GroupViTVisionConfig): raise ValueError( "config.vision_config is expected to be of type GroupViTVisionConfig but is of type" f" {type(config.vision_config)}." ) text_config = config.text_config vision_config = config.vision_config self.projection_dim = config.projection_dim self.projection_intermediate_dim = config.projection_intermediate_dim self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size self.text_model = GroupViTTextTransformer(text_config) self.vision_model = GroupViTVisionTransformer(vision_config) self.visual_projection = nn.Sequential( nn.Linear(self.vision_embed_dim, self.projection_intermediate_dim, bias=True), nn.BatchNorm1d(self.projection_intermediate_dim), nn.ReLU(inplace=True), nn.Linear(self.projection_intermediate_dim, self.projection_dim, bias=True), ) self.text_projection = nn.Sequential( nn.Linear(self.text_embed_dim, self.projection_intermediate_dim, bias=True), nn.BatchNorm1d(self.projection_intermediate_dim), nn.ReLU(inplace=True), nn.Linear(self.projection_intermediate_dim, self.projection_dim, bias=True), ) self.logit_scale = nn.Parameter(torch.ones([]) * self.config.logit_scale_init_value) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING) def get_text_features( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> torch.FloatTensor: r""" Returns: text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`GroupViTTextModel`]. Examples: ```python >>> from transformers import CLIPTokenizer, GroupViTModel >>> model = GroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> tokenizer = CLIPTokenizer.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") >>> text_features = model.get_text_features(**inputs) ```""" # Use GROUPVIT model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = text_outputs[1] text_features = self.text_projection(pooled_output) return text_features @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING) def get_image_features( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> torch.FloatTensor: r""" Returns: image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`GroupViTVisionModel`]. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, GroupViTModel >>> model = GroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> image_features = model.get_image_features(**inputs) ```""" # Use GROUPVIT model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = vision_outputs[1] # pooled_output image_features = self.visual_projection(pooled_output) return image_features @add_start_docstrings_to_model_forward(GROUPVIT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=GroupViTModelOutput, config_class=GroupViTConfig) def forward( self, input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, return_loss: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_segmentation: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, GroupViTModelOutput]: r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, GroupViTModel >>> model = GroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor( ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True ... ) >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ```""" # Use GROUPVIT model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_segmentation = ( output_segmentation if output_segmentation is not None else self.config.output_segmentation ) if output_segmentation: output_attentions = True output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) image_embeds = vision_outputs[1] image_embeds = self.visual_projection(image_embeds) text_embeds = text_outputs[1] text_embeds = self.text_projection(text_embeds) # normalized features image_embeds = image_embeds / image_embeds.norm(dim=-1, keepdim=True) text_embeds = text_embeds / text_embeds.norm(dim=-1, keepdim=True) # cosine similarity as logits logit_scale = self.logit_scale.exp() logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale logits_per_image = logits_per_text.t() seg_logits = None if output_segmentation: # grouped features # [batch_size_image, num_group, hidden_size] image_group_embeds = vision_outputs[0] # [batch_size_image*num_group, hidden_size] image_group_embeds = self.visual_projection(image_group_embeds.reshape(-1, image_group_embeds.shape[-1])) if output_hidden_states: attentions = vision_outputs[3] else: attentions = vision_outputs[2] # [batch_size_image, num_group, height, width] grouping = get_grouping_from_attentions(attentions, pixel_values.shape[2:]) # normalized features image_group_embeds = image_group_embeds / image_group_embeds.norm(dim=-1, keepdim=True) # [batch_size_image x num_group, batch_size_text] logits_per_image_group = torch.matmul(image_group_embeds, text_embeds.t()) * logit_scale # [batch_size_image, batch_size_text, num_group] logits_per_image_group = logits_per_image_group.reshape( image_embeds.shape[0], -1, text_embeds.shape[0] ).permute(0, 2, 1) # [batch_size_image, batch_size_text, height x width] flatten_grouping = grouping.reshape(grouping.shape[0], grouping.shape[1], -1) # [batch_size_image, batch_size_text, height, width] seg_logits = torch.matmul(logits_per_image_group, flatten_grouping) * logit_scale seg_logits = seg_logits.reshape( seg_logits.shape[0], seg_logits.shape[1], grouping.shape[2], grouping.shape[3] ) loss = None if return_loss: loss = groupvit_loss(logits_per_text) if not return_dict: if seg_logits is not None: output = ( logits_per_image, logits_per_text, seg_logits, text_embeds, image_embeds, text_outputs, vision_outputs, ) else: output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) return ((loss,) + output) if loss is not None else output return GroupViTModelOutput( loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, segmentation_logits=seg_logits, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs, )
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./src/transformers/models/whisper/modeling_tf_whisper.py
# coding=utf-8 # Copyright 2022 The OpenAI Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TensorFlow Whisper model.""" import math import random from typing import Dict, Optional, Tuple import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import ( TFBaseModelOutput, TFBaseModelOutputWithPastAndCrossAttentions, TFSeq2SeqLMOutput, TFSeq2SeqModelOutput, ) from ...modeling_tf_utils import TFCausalLanguageModelingLoss, TFPreTrainedModel, keras_serializable, unpack_inputs from ...tf_utils import shape_list, stable_softmax from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_whisper import WhisperConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "WhisperConfig" TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST = [ "openai/whisper-base", # See all Whisper models at https://huggingface.co/models?filter=whisper ] LARGE_NEGATIVE = -1e8 # Copied from transformers.models.bart.modeling_tf_bart.shift_tokens_right def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int): pad_token_id = tf.cast(pad_token_id, input_ids.dtype) decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype) start_tokens = tf.fill( (shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype) ) shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1) # replace possible -100 values in labels by `pad_token_id` shifted_input_ids = tf.where( shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)), shifted_input_ids, ) # "Verify that `labels` has only positive values and -100" assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype)) # Make sure the assertion op is called by wrapping the result in an identity no-op with tf.control_dependencies([assert_gte0]): shifted_input_ids = tf.identity(shifted_input_ids) return shifted_input_ids # Copied from transformers.models.bart.modeling_tf_bart._make_causal_mask def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0): """ Make causal mask used for bi-directional self-attention. """ bsz = input_ids_shape[0] tgt_len = input_ids_shape[1] mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE mask_cond = tf.range(shape_list(mask)[-1]) mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask) if past_key_values_length > 0: mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1) return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1)) # Copied from transformers.models.bart.modeling_tf_bart._expand_mask def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ src_len = shape_list(mask)[1] tgt_len = tgt_len if tgt_len is not None else src_len one_cst = tf.constant(1.0) mask = tf.cast(mask, dtype=one_cst.dtype) expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1)) return (one_cst - expanded_mask) * LARGE_NEGATIVE class TFWhisperPositionalEmbedding(tf.keras.layers.Layer): def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None, **kwargs): super().__init__(**kwargs) self.num_positions = num_positions self.embedding_dim = embedding_dim self.padding_idx = padding_idx def build(self, input_shape): self.weight = self.add_weight( name="weight", shape=[self.num_positions, self.embedding_dim], trainable=True, ) super().build(input_shape) def call(self, input_ids, past_key_values_length=0): past_key_values_length = tf.cast(past_key_values_length, tf.int32) gather_indices = tf.range(tf.shape(input_ids)[-1], delta=1) + past_key_values_length return tf.gather(self.weight, gather_indices) class TFWhisperAttention(tf.keras.layers.Layer): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, **kwargs ): super().__init__(**kwargs) self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = tf.keras.layers.Dropout(dropout) self.head_dim = embed_dim // num_heads if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.k_proj = tf.keras.layers.Dense(embed_dim, use_bias=False, name="k_proj") self.v_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj") self.q_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj") self.out_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj") # Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention._shape with BART->whisper def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int): return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3)) # Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention.call with BART->whisper def call( self, hidden_states: tf.Tensor, key_value_states: Optional[tf.Tensor] = None, past_key_value: Optional[Tuple[Tuple[tf.Tensor]]] = None, attention_mask: Optional[tf.Tensor] = None, layer_head_mask: Optional[tf.Tensor] = None, training: Optional[bool] = False, ) -> Tuple[tf.Tensor, Optional[tf.Tensor]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, embed_dim = shape_list(hidden_states) # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = tf.concat([past_key_value[0], key_states], axis=2) value_states = tf.concat([past_key_value[1], value_states], axis=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape) key_states = tf.reshape(key_states, proj_shape) value_states = tf.reshape(value_states, proj_shape) src_len = shape_list(key_states)[1] attn_weights = tf.matmul(query_states, key_states, transpose_b=True) tf.debugging.assert_equal( shape_list(attn_weights), [bsz * self.num_heads, tgt_len, src_len], message=( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {shape_list(attn_weights)}" ), ) if attention_mask is not None: tf.debugging.assert_equal( shape_list(attention_mask), [bsz, 1, tgt_len, src_len], message=( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" f" {shape_list(attention_mask)}" ), ) attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_weights = stable_softmax(attn_weights, axis=-1) if layer_head_mask is not None: tf.debugging.assert_equal( shape_list(layer_head_mask), [self.num_heads], message=( f"Head mask for a single layer should be of size {(self.num_heads)}, but is" f" {shape_list(layer_head_mask)}" ), ) attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( attn_weights, (bsz, self.num_heads, tgt_len, src_len) ) attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_probs = self.dropout(attn_weights, training=training) attn_output = tf.matmul(attn_probs, value_states) tf.debugging.assert_equal( shape_list(attn_output), [bsz * self.num_heads, tgt_len, self.head_dim], message=( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {shape_list(attn_output)}" ), ) attn_output = tf.transpose( tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) ) attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim)) attn_output = self.out_proj(attn_output) attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) return attn_output, attn_weights, past_key_value # Copied from transformers.models.speech_to_text.modeling_tf_speech_to_text.TFSpeech2TextEncoderLayer with Speech2Text->Whisper class TFWhisperEncoderLayer(tf.keras.layers.Layer): def __init__(self, config: WhisperConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.d_model self.self_attn = TFWhisperAttention( self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name="self_attn" ) self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") self.dropout = tf.keras.layers.Dropout(config.dropout) self.activation_fn = get_tf_activation(config.activation_function) self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout) self.fc1 = tf.keras.layers.Dense(config.encoder_ffn_dim, name="fc1") self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2") self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, layer_head_mask: tf.Tensor, training: bool = False ): """ Args: hidden_states (`tf.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (`tf.Tensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)` """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, self_attn_weights, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, training=training, ) tf.debugging.assert_equal( shape_list(hidden_states), shape_list(residual), message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}", ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout(hidden_states, training=training) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states return hidden_states, self_attn_weights # Copied from transformers.models.speech_to_text.modeling_tf_speech_to_text.TFSpeech2TextDecoderLayer with Speech2Text->Whisper class TFWhisperDecoderLayer(tf.keras.layers.Layer): def __init__(self, config: WhisperConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.d_model self.self_attn = TFWhisperAttention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, name="self_attn", is_decoder=True, ) self.dropout = tf.keras.layers.Dropout(config.dropout) self.activation_fn = get_tf_activation(config.activation_function) self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout) self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") self.encoder_attn = TFWhisperAttention( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, name="encoder_attn", is_decoder=True, ) self.encoder_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm") self.fc1 = tf.keras.layers.Dense(config.decoder_ffn_dim, name="fc1") self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2") self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") def call( self, hidden_states, attention_mask: Optional[tf.Tensor] = None, encoder_hidden_states: Optional[tf.Tensor] = None, encoder_attention_mask: Optional[tf.Tensor] = None, layer_head_mask: Optional[tf.Tensor] = None, cross_attn_layer_head_mask: Optional[tf.Tensor] = None, past_key_value: Optional[Tuple[tf.Tensor]] = None, training=False, ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]: """ Args: hidden_states (`tf.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (`tf.Tensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`tf.Tensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)` encoder_attention_mask (`tf.Tensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size `(decoder_attention_heads,)` cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module. `(decoder_attention_heads,)` past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, training=training, ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, training=training, ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value # Fully Connected residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout(hidden_states, training=training) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states return ( hidden_states, self_attn_weights, cross_attn_weights, present_key_value, ) class TFWhisperPreTrainedModel(TFPreTrainedModel): config_class = WhisperConfig base_model_prefix = "model" main_input_name = "input_features" def _get_feat_extract_output_lengths(self, input_lengths: tf.Tensor) -> int: """ Computes the output length of the convolutional layers """ input_lengths = (input_lengths - 1) // 2 + 1 return input_lengths @property def dummy_inputs(self) -> Dict[str, tf.Tensor]: """ Dummy inputs to build the network. Returns: `Dict[str, tf.Tensor]`: The dummy inputs. """ return { self.main_input_name: tf.random.uniform( [2, self.config.num_mel_bins, self.config.max_source_positions * 2 - 1], dtype=tf.float32 ), "decoder_input_ids": tf.constant([[2, 3]], dtype=tf.int64), } @tf.function( input_signature=[ { "input_features": tf.TensorSpec((None, None, None), tf.float32, name="input_features"), "decoder_input_ids": tf.TensorSpec((None, None), tf.int64, name="decoder_input_ids"), "decoder_attention_mask": tf.TensorSpec((None, None), tf.int64, name="decoder_attention_mask"), } ] ) def serving(self, inputs): output = self.call(inputs) return self.serving_output(output) WHISPER_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. Parameters: config ([`WhisperConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ WHISPER_INPUTS_DOCSTRING = r""" Args: input_features (`tf.Tensor` of shape `(batch_size, feature_size, sequence_length)`): Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the [`WhisperFeatureExtractor`] should be used for extracting the fbank features, padding and conversion into a tensor of type `tf.Tensor`. See [`~WhisperFeatureExtractor.__call__`] decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`SpeechToTextTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) SpeechToText uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should read [`modeling_whisper._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(tf.Tensor)`, *optional*): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(tf.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(tf.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. decoder_inputs_embeds (`tf.Tensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @keras_serializable class TFWhisperEncoder(tf.keras.layers.Layer): config_class = WhisperConfig """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`TFWhisperEncoderLayer`]. Args: config: WhisperConfig embed_tokens (TFWhisperEmbedding): output embedding """ def __init__(self, config: WhisperConfig, **kwargs): super().__init__(**kwargs) self.config = config self.layerdrop = config.encoder_layerdrop self.embed_dim = config.d_model self.num_mel_bins = config.num_mel_bins self.padding_idx = config.pad_token_id self.max_source_positions = config.max_source_positions self.embed_scale = math.sqrt(self.embed_dim) if config.scale_embedding else 1.0 # Padding is added in call() to match the PyTorch implementation self.conv1 = tf.keras.layers.Conv1D(self.embed_dim, kernel_size=3, strides=1, padding="valid", name="conv1") self.conv2 = tf.keras.layers.Conv1D(self.embed_dim, kernel_size=3, strides=2, padding="valid", name="conv2") self.embed_positions = TFWhisperPositionalEmbedding( self.max_source_positions, self.embed_dim, name="embed_positions" ) self.encoder_layers = [TFWhisperEncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)] self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm") self.dropout = tf.keras.layers.Dropout(config.dropout) @unpack_inputs def call( self, input_features=None, head_mask=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): r""" Args: input_features (`tf.Tensor` of shape `(batch_size, feature_size, sequence_length)`): Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the [`WhisperFeatureExtractor`] should be used for extracting the fbank features, padding and conversion into a tensor of type `tf.Tensor`. See [`~WhisperFeatureExtractor.__call__`] head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # TF 2.0 layers can't use channels first format when running on CPU. input_features = tf.transpose(input_features, perm=(0, 2, 1)) input_features = tf.pad(input_features, [[0, 0], [1, 1], [0, 0]]) inputs_embeds = tf.keras.activations.gelu(self.conv1(input_features)) inputs_embeds = tf.pad(inputs_embeds, [[0, 0], [1, 1], [0, 0]]) inputs_embeds = tf.keras.activations.gelu(self.conv2(inputs_embeds)) inputs_embeds = tf.transpose(inputs_embeds, perm=(0, 1, 2)) embed_pos = self.embed_positions(input_ids=tf.zeros((1, self.max_source_positions), dtype=tf.int32)) hidden_states = inputs_embeds + embed_pos hidden_states = self.dropout(hidden_states, training=training) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: tf.debugging.assert_equal( shape_list(head_mask)[0], len(self.encoder_layers), message=( f"The head_mask should be specified for {len(self.encoder_layers)} layers, but it is for" f" {shape_list(head_mask)[0]}." ), ) for idx, encoder_layer in enumerate(self.encoder_layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = random.uniform(0, 1) if training and (dropout_probability < self.layerdrop): # skip the layer continue hidden_states, attn = encoder_layer( hidden_states, None, layer_head_mask=(head_mask[idx] if head_mask is not None else None), training=training, ) if output_attentions: all_attentions += (attn,) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) @keras_serializable class TFWhisperDecoder(tf.keras.layers.Layer): config_class = WhisperConfig """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TFWhisperDecoderLayer`] Args: config: WhisperConfig """ def __init__(self, config: WhisperConfig, **kwargs): super().__init__(**kwargs) self.config = config self.dropout = tf.keras.layers.Dropout(config.dropout) self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_target_positions self.max_source_positions = config.max_source_positions self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.embed_tokens = tf.keras.layers.Embedding( input_dim=config.vocab_size, output_dim=config.d_model, embeddings_initializer=tf.keras.initializers.TruncatedNormal(stddev=self.config.init_std), name="embed_tokens", ) self.embed_positions = TFWhisperPositionalEmbedding( self.max_target_positions, config.d_model, name="embed_positions" ) self.decoder_layers = [TFWhisperDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)] self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm") def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value def _prepare_decoder_attention_mask(self, attention_mask, input_shape, past_key_values_length): # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] batch_size, seq_len = input_shape[0], input_shape[1] combined_attention_mask = tf.cond( tf.math.greater(seq_len, 1), lambda: _make_causal_mask(input_shape, past_key_values_length=past_key_values_length), lambda: _expand_mask(tf.ones((batch_size, seq_len + past_key_values_length)), tgt_len=seq_len), ) if attention_mask is not None: attention_mask = tf.cond( tf.greater(tf.shape(attention_mask)[-1], seq_len) & tf.greater(seq_len, 0), lambda: attention_mask[:, : seq_len + past_key_values_length], lambda: attention_mask, ) # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = _expand_mask(attention_mask, tgt_len=input_shape[-1]) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) return combined_attention_mask @unpack_inputs def call( self, input_ids=None, attention_mask=None, position_ids=None, encoder_hidden_states=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): r""" Args: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`WhisperTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(tf.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(tf.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input_shape = tf.shape(input_ids) input_ids = tf.reshape(input_ids, (-1, input_shape[-1])) elif inputs_embeds is not None: input_shape = tf.shape(inputs_embeds)[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if inputs_embeds is None: # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound # indices on GPU, returning zeros instead. This is a dangerous silent behavior. tf.debugging.assert_less( input_ids, tf.cast(self.embed_tokens.input_dim, dtype=input_ids.dtype), message=( "input_ids must be smaller than the embedding layer's input dimension (got" f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.input_dim})" ), ) inputs_embeds = self.embed_tokens(input_ids) attention_mask = self._prepare_decoder_attention_mask(attention_mask, input_shape, past_key_values_length) # embed positions filled_past_positions = past_key_values_length if position_ids is None else position_ids[0, -1] positions = self.embed_positions(input_ids, past_key_values_length=filled_past_positions) hidden_states = inputs_embeds + positions hidden_states = self.dropout(hidden_states, training=training) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None next_decoder_cache = () if use_cache else None # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]: if attn_mask is not None: tf.debugging.assert_equal( shape_list(attn_mask)[0], len(self.decoder_layers), message=( f"The {attn_mask_name} should be specified for {len(self.decoder_layers)} layers, but it is" f" for {shape_list(attn_mask)[0]}." ), ) for idx, decoder_layer in enumerate(self.decoder_layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if training and (dropout_probability < self.layerdrop): continue past_key_value = past_key_values[idx] if past_key_values is not None else None layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, layer_head_mask=(head_mask[idx] if head_mask is not None else None), cross_attn_layer_head_mask=(cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None), past_key_value=past_key_value, training=training, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[3],) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) hidden_states = self.layer_norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return TFBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) @add_start_docstrings( "The bare Whisper Model outputting raw hidden-states without any specific head on top.", WHISPER_START_DOCSTRING, ) @keras_serializable class TFWhisperMainLayer(tf.keras.layers.Layer): config_class = WhisperConfig def __init__(self, config: WhisperConfig, **kwargs): super().__init__(**kwargs) self.config = config self.encoder = TFWhisperEncoder(config, name="encoder") self.decoder = TFWhisperDecoder(config, name="decoder") def get_input_embeddings(self): return self.decoder.embed_tokens def set_input_embeddings(self, value): self.decoder.embed_tokens = value def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(WHISPER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @unpack_inputs def call( self, input_features=None, decoder_input_ids=None, decoder_attention_mask=None, decoder_position_ids=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, encoder_outputs=None, past_key_values=None, decoder_inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): r""" Returns: Example: ```python >>> import tensorflow as tf >>> from transformers import TFWhisperModel, WhisperFeatureExtractor >>> from datasets import load_dataset >>> model = TFWhisperModel.from_pretrained("openai/whisper-base") >>> feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-base") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = feature_extractor(ds[0]["audio"]["array"], return_tensors="tf") >>> input_features = inputs.input_features >>> decoder_input_ids = tf.convert_to_tensor([[1, 1]]) * model.config.decoder_start_token_id >>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state >>> list(last_hidden_state.shape) [1, 2, 512] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder( input_features, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) # If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, TFBaseModelOutput): encoder_outputs = TFBaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) if not return_dict: return decoder_outputs + encoder_outputs return TFSeq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings( "The bare Whisper Model outputting raw hidden-states without any specific head on top.", WHISPER_START_DOCSTRING, ) class TFWhisperModel(TFWhisperPreTrainedModel): def __init__(self, config: WhisperConfig, **kwargs): super().__init__(config, **kwargs) self.model = TFWhisperMainLayer(config, name="model") def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value def get_encoder(self): return self.model.encoder def get_decoder(self): return self.model.decoder def decoder(self): return self.model.decoder def encoder(self): return self.model.encoder @add_start_docstrings_to_model_forward(WHISPER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @unpack_inputs def call( self, input_features=None, decoder_input_ids=None, decoder_attention_mask=None, decoder_position_ids=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, encoder_outputs=None, past_key_values=None, decoder_inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): r""" Returns: Example: ```python >>> import tensorflow as tf >>> from transformers import TFWhisperModel, WhisperFeatureExtractor >>> from datasets import load_dataset >>> model = TFWhisperModel.from_pretrained("openai/whisper-base") >>> feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-base") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = feature_extractor(ds[0]["audio"]["array"], return_tensors="tf") >>> input_features = inputs.input_features >>> decoder_input_ids = tf.convert_to_tensor([[1, 1]]) * model.config.decoder_start_token_id >>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state >>> list(last_hidden_state.shape) [1, 2, 512] ```""" outputs = self.model( input_features=input_features, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def serving_output(self, output): pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None return TFSeq2SeqModelOutput( last_hidden_state=output.last_hidden_state, past_key_values=pkv, decoder_hidden_states=dec_hs, decoder_attentions=dec_attns, cross_attentions=cross_attns, encoder_last_hidden_state=output.encoder_last_hidden_state, encoder_hidden_states=enc_hs, encoder_attentions=enc_attns, ) @add_start_docstrings( "The Whisper Model with a language modeling head. Can be used for automatic speech recognition.", WHISPER_START_DOCSTRING, ) class TFWhisperForConditionalGeneration(TFWhisperPreTrainedModel, TFCausalLanguageModelingLoss): base_model_prefix = "model" _keys_to_ignore_on_load_missing = [ r"encoder.version", r"decoder.version", r"proj_out.weight", ] _keys_to_ignore_on_save = [ r"proj_out.weight", ] def __init__(self, config: WhisperConfig, **kwargs): super().__init__(config, **kwargs) self.model = TFWhisperMainLayer(config, name="model") def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() def get_output_embeddings(self): return self.get_input_embeddings() def set_output_embeddings(self, value): self.set_input_embeddings(value) def resize_token_embeddings(self, new_num_tokens: int) -> tf.keras.layers.Embedding: new_embeddings = super().resize_token_embeddings(new_num_tokens) return new_embeddings @add_start_docstrings_to_model_forward(WHISPER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @unpack_inputs def call( self, input_features=None, decoder_input_ids=None, decoder_attention_mask=None, decoder_position_ids=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, encoder_outputs=None, past_key_values=None, decoder_inputs_embeds=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Example: ```python >>> import tensorflow as tf >>> from transformers import WhisperProcessor, TFWhisperForConditionalGeneration >>> from datasets import load_dataset >>> processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") >>> model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = processor(ds[0]["audio"]["array"], return_tensors="tf") >>> input_features = inputs.input_features >>> generated_ids = model.generate(input_ids=input_features) >>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] >>> transcription ' Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.' ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) outputs = self.model( input_features, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) decoder_last_hidden_state = outputs[0] # Decoder and encoder embeddings are tied lm_logits = tf.matmul(decoder_last_hidden_state, self.get_output_embeddings().weights, transpose_b=True) loss = None if labels is None else self.hf_compute_loss(labels, lm_logits) if not return_dict: output = (lm_logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFSeq2SeqLMOutput( loss=loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) def serving_output(self, output): pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None return TFSeq2SeqLMOutput( logits=output.logits, past_key_values=pkv, decoder_hidden_states=dec_hs, decoder_attentions=dec_attns, cross_attentions=cross_attns, encoder_last_hidden_state=output.encoder_last_hidden_state, encoder_hidden_states=enc_hs, encoder_attentions=enc_attns, ) def prepare_inputs_for_generation( self, decoder_input_ids, past=None, use_cache=None, encoder_outputs=None, attention_mask=None, decoder_attention_mask=None, **kwargs ): # cut decoder_input_ids if past is used if past is not None: decoder_input_ids = decoder_input_ids[:, -1:] if decoder_attention_mask is not None: # xla decoder_position_ids = tf.math.cumsum(decoder_attention_mask, axis=-1, exclusive=True)[:, -1:] elif past is not None: # no xla + past decoder_position_ids = past[0][0].shape[2] else: # no xla + no past decoder_position_ids = tf.range(decoder_input_ids.shape[1]) decoder_position_ids = tf.broadcast_to(decoder_position_ids, decoder_input_ids.shape) return { "input_features": None, # Needs to be passed to make Keras.layer.__call__ happy "encoder_outputs": encoder_outputs, "past_key_values": past, "decoder_input_ids": decoder_input_ids, "use_cache": use_cache, "decoder_attention_mask": decoder_attention_mask, "decoder_position_ids": decoder_position_ids, } # @staticmethod def _reorder_cache(past, beam_idx): reordered_past = () for layer_past in past: reordered_past += (tuple(tf.gather(past_state, beam_idx) for past_state in layer_past),) return reordered_past
# coding=utf-8 # Copyright 2022 The OpenAI Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TensorFlow Whisper model.""" import math import random from typing import Dict, Optional, Tuple import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import ( TFBaseModelOutput, TFBaseModelOutputWithPastAndCrossAttentions, TFSeq2SeqLMOutput, TFSeq2SeqModelOutput, ) from ...modeling_tf_utils import TFCausalLanguageModelingLoss, TFPreTrainedModel, keras_serializable, unpack_inputs from ...tf_utils import shape_list, stable_softmax from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_whisper import WhisperConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "WhisperConfig" TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST = [ "openai/whisper-base", # See all Whisper models at https://huggingface.co/models?filter=whisper ] LARGE_NEGATIVE = -1e8 # Copied from transformers.models.bart.modeling_tf_bart.shift_tokens_right def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int): pad_token_id = tf.cast(pad_token_id, input_ids.dtype) decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype) start_tokens = tf.fill( (shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype) ) shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1) # replace possible -100 values in labels by `pad_token_id` shifted_input_ids = tf.where( shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)), shifted_input_ids, ) # "Verify that `labels` has only positive values and -100" assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype)) # Make sure the assertion op is called by wrapping the result in an identity no-op with tf.control_dependencies([assert_gte0]): shifted_input_ids = tf.identity(shifted_input_ids) return shifted_input_ids # Copied from transformers.models.bart.modeling_tf_bart._make_causal_mask def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0): """ Make causal mask used for bi-directional self-attention. """ bsz = input_ids_shape[0] tgt_len = input_ids_shape[1] mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE mask_cond = tf.range(shape_list(mask)[-1]) mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask) if past_key_values_length > 0: mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1) return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1)) # Copied from transformers.models.bart.modeling_tf_bart._expand_mask def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ src_len = shape_list(mask)[1] tgt_len = tgt_len if tgt_len is not None else src_len one_cst = tf.constant(1.0) mask = tf.cast(mask, dtype=one_cst.dtype) expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1)) return (one_cst - expanded_mask) * LARGE_NEGATIVE class TFWhisperPositionalEmbedding(tf.keras.layers.Layer): def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None, **kwargs): super().__init__(**kwargs) self.num_positions = num_positions self.embedding_dim = embedding_dim self.padding_idx = padding_idx def build(self, input_shape): self.weight = self.add_weight( name="weight", shape=[self.num_positions, self.embedding_dim], trainable=True, ) super().build(input_shape) def call(self, input_ids, past_key_values_length=0): past_key_values_length = tf.cast(past_key_values_length, tf.int32) gather_indices = tf.range(tf.shape(input_ids)[-1], delta=1) + past_key_values_length return tf.gather(self.weight, gather_indices) class TFWhisperAttention(tf.keras.layers.Layer): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, **kwargs ): super().__init__(**kwargs) self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = tf.keras.layers.Dropout(dropout) self.head_dim = embed_dim // num_heads if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.k_proj = tf.keras.layers.Dense(embed_dim, use_bias=False, name="k_proj") self.v_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj") self.q_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj") self.out_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj") # Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention._shape with BART->whisper def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int): return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3)) # Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention.call with BART->whisper def call( self, hidden_states: tf.Tensor, key_value_states: Optional[tf.Tensor] = None, past_key_value: Optional[Tuple[Tuple[tf.Tensor]]] = None, attention_mask: Optional[tf.Tensor] = None, layer_head_mask: Optional[tf.Tensor] = None, training: Optional[bool] = False, ) -> Tuple[tf.Tensor, Optional[tf.Tensor]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, embed_dim = shape_list(hidden_states) # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = tf.concat([past_key_value[0], key_states], axis=2) value_states = tf.concat([past_key_value[1], value_states], axis=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape) key_states = tf.reshape(key_states, proj_shape) value_states = tf.reshape(value_states, proj_shape) src_len = shape_list(key_states)[1] attn_weights = tf.matmul(query_states, key_states, transpose_b=True) tf.debugging.assert_equal( shape_list(attn_weights), [bsz * self.num_heads, tgt_len, src_len], message=( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {shape_list(attn_weights)}" ), ) if attention_mask is not None: tf.debugging.assert_equal( shape_list(attention_mask), [bsz, 1, tgt_len, src_len], message=( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" f" {shape_list(attention_mask)}" ), ) attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_weights = stable_softmax(attn_weights, axis=-1) if layer_head_mask is not None: tf.debugging.assert_equal( shape_list(layer_head_mask), [self.num_heads], message=( f"Head mask for a single layer should be of size {(self.num_heads)}, but is" f" {shape_list(layer_head_mask)}" ), ) attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( attn_weights, (bsz, self.num_heads, tgt_len, src_len) ) attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_probs = self.dropout(attn_weights, training=training) attn_output = tf.matmul(attn_probs, value_states) tf.debugging.assert_equal( shape_list(attn_output), [bsz * self.num_heads, tgt_len, self.head_dim], message=( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {shape_list(attn_output)}" ), ) attn_output = tf.transpose( tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) ) attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim)) attn_output = self.out_proj(attn_output) attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) return attn_output, attn_weights, past_key_value # Copied from transformers.models.speech_to_text.modeling_tf_speech_to_text.TFSpeech2TextEncoderLayer with Speech2Text->Whisper class TFWhisperEncoderLayer(tf.keras.layers.Layer): def __init__(self, config: WhisperConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.d_model self.self_attn = TFWhisperAttention( self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name="self_attn" ) self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") self.dropout = tf.keras.layers.Dropout(config.dropout) self.activation_fn = get_tf_activation(config.activation_function) self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout) self.fc1 = tf.keras.layers.Dense(config.encoder_ffn_dim, name="fc1") self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2") self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, layer_head_mask: tf.Tensor, training: bool = False ): """ Args: hidden_states (`tf.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (`tf.Tensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)` """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, self_attn_weights, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, training=training, ) tf.debugging.assert_equal( shape_list(hidden_states), shape_list(residual), message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}", ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout(hidden_states, training=training) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states return hidden_states, self_attn_weights # Copied from transformers.models.speech_to_text.modeling_tf_speech_to_text.TFSpeech2TextDecoderLayer with Speech2Text->Whisper class TFWhisperDecoderLayer(tf.keras.layers.Layer): def __init__(self, config: WhisperConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.d_model self.self_attn = TFWhisperAttention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, name="self_attn", is_decoder=True, ) self.dropout = tf.keras.layers.Dropout(config.dropout) self.activation_fn = get_tf_activation(config.activation_function) self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout) self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") self.encoder_attn = TFWhisperAttention( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, name="encoder_attn", is_decoder=True, ) self.encoder_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm") self.fc1 = tf.keras.layers.Dense(config.decoder_ffn_dim, name="fc1") self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2") self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") def call( self, hidden_states, attention_mask: Optional[tf.Tensor] = None, encoder_hidden_states: Optional[tf.Tensor] = None, encoder_attention_mask: Optional[tf.Tensor] = None, layer_head_mask: Optional[tf.Tensor] = None, cross_attn_layer_head_mask: Optional[tf.Tensor] = None, past_key_value: Optional[Tuple[tf.Tensor]] = None, training=False, ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]: """ Args: hidden_states (`tf.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (`tf.Tensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`tf.Tensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)` encoder_attention_mask (`tf.Tensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size `(decoder_attention_heads,)` cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module. `(decoder_attention_heads,)` past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, training=training, ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, training=training, ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value # Fully Connected residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout(hidden_states, training=training) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states return ( hidden_states, self_attn_weights, cross_attn_weights, present_key_value, ) class TFWhisperPreTrainedModel(TFPreTrainedModel): config_class = WhisperConfig base_model_prefix = "model" main_input_name = "input_features" def _get_feat_extract_output_lengths(self, input_lengths: tf.Tensor) -> int: """ Computes the output length of the convolutional layers """ input_lengths = (input_lengths - 1) // 2 + 1 return input_lengths @property def dummy_inputs(self) -> Dict[str, tf.Tensor]: """ Dummy inputs to build the network. Returns: `Dict[str, tf.Tensor]`: The dummy inputs. """ return { self.main_input_name: tf.random.uniform( [2, self.config.num_mel_bins, self.config.max_source_positions * 2 - 1], dtype=tf.float32 ), "decoder_input_ids": tf.constant([[2, 3]], dtype=tf.int64), } @tf.function( input_signature=[ { "input_features": tf.TensorSpec((None, None, None), tf.float32, name="input_features"), "decoder_input_ids": tf.TensorSpec((None, None), tf.int64, name="decoder_input_ids"), "decoder_attention_mask": tf.TensorSpec((None, None), tf.int64, name="decoder_attention_mask"), } ] ) def serving(self, inputs): output = self.call(inputs) return self.serving_output(output) WHISPER_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. Parameters: config ([`WhisperConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ WHISPER_INPUTS_DOCSTRING = r""" Args: input_features (`tf.Tensor` of shape `(batch_size, feature_size, sequence_length)`): Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the [`WhisperFeatureExtractor`] should be used for extracting the fbank features, padding and conversion into a tensor of type `tf.Tensor`. See [`~WhisperFeatureExtractor.__call__`] decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`SpeechToTextTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) SpeechToText uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should read [`modeling_whisper._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(tf.Tensor)`, *optional*): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(tf.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(tf.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. decoder_inputs_embeds (`tf.Tensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @keras_serializable class TFWhisperEncoder(tf.keras.layers.Layer): config_class = WhisperConfig """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`TFWhisperEncoderLayer`]. Args: config: WhisperConfig embed_tokens (TFWhisperEmbedding): output embedding """ def __init__(self, config: WhisperConfig, **kwargs): super().__init__(**kwargs) self.config = config self.layerdrop = config.encoder_layerdrop self.embed_dim = config.d_model self.num_mel_bins = config.num_mel_bins self.padding_idx = config.pad_token_id self.max_source_positions = config.max_source_positions self.embed_scale = math.sqrt(self.embed_dim) if config.scale_embedding else 1.0 # Padding is added in call() to match the PyTorch implementation self.conv1 = tf.keras.layers.Conv1D(self.embed_dim, kernel_size=3, strides=1, padding="valid", name="conv1") self.conv2 = tf.keras.layers.Conv1D(self.embed_dim, kernel_size=3, strides=2, padding="valid", name="conv2") self.embed_positions = TFWhisperPositionalEmbedding( self.max_source_positions, self.embed_dim, name="embed_positions" ) self.encoder_layers = [TFWhisperEncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)] self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm") self.dropout = tf.keras.layers.Dropout(config.dropout) @unpack_inputs def call( self, input_features=None, head_mask=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): r""" Args: input_features (`tf.Tensor` of shape `(batch_size, feature_size, sequence_length)`): Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the [`WhisperFeatureExtractor`] should be used for extracting the fbank features, padding and conversion into a tensor of type `tf.Tensor`. See [`~WhisperFeatureExtractor.__call__`] head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # TF 2.0 layers can't use channels first format when running on CPU. input_features = tf.transpose(input_features, perm=(0, 2, 1)) input_features = tf.pad(input_features, [[0, 0], [1, 1], [0, 0]]) inputs_embeds = tf.keras.activations.gelu(self.conv1(input_features)) inputs_embeds = tf.pad(inputs_embeds, [[0, 0], [1, 1], [0, 0]]) inputs_embeds = tf.keras.activations.gelu(self.conv2(inputs_embeds)) inputs_embeds = tf.transpose(inputs_embeds, perm=(0, 1, 2)) embed_pos = self.embed_positions(input_ids=tf.zeros((1, self.max_source_positions), dtype=tf.int32)) hidden_states = inputs_embeds + embed_pos hidden_states = self.dropout(hidden_states, training=training) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: tf.debugging.assert_equal( shape_list(head_mask)[0], len(self.encoder_layers), message=( f"The head_mask should be specified for {len(self.encoder_layers)} layers, but it is for" f" {shape_list(head_mask)[0]}." ), ) for idx, encoder_layer in enumerate(self.encoder_layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = random.uniform(0, 1) if training and (dropout_probability < self.layerdrop): # skip the layer continue hidden_states, attn = encoder_layer( hidden_states, None, layer_head_mask=(head_mask[idx] if head_mask is not None else None), training=training, ) if output_attentions: all_attentions += (attn,) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) @keras_serializable class TFWhisperDecoder(tf.keras.layers.Layer): config_class = WhisperConfig """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TFWhisperDecoderLayer`] Args: config: WhisperConfig """ def __init__(self, config: WhisperConfig, **kwargs): super().__init__(**kwargs) self.config = config self.dropout = tf.keras.layers.Dropout(config.dropout) self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_target_positions self.max_source_positions = config.max_source_positions self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.embed_tokens = tf.keras.layers.Embedding( input_dim=config.vocab_size, output_dim=config.d_model, embeddings_initializer=tf.keras.initializers.TruncatedNormal(stddev=self.config.init_std), name="embed_tokens", ) self.embed_positions = TFWhisperPositionalEmbedding( self.max_target_positions, config.d_model, name="embed_positions" ) self.decoder_layers = [TFWhisperDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)] self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm") def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value def _prepare_decoder_attention_mask(self, attention_mask, input_shape, past_key_values_length): # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] batch_size, seq_len = input_shape[0], input_shape[1] combined_attention_mask = tf.cond( tf.math.greater(seq_len, 1), lambda: _make_causal_mask(input_shape, past_key_values_length=past_key_values_length), lambda: _expand_mask(tf.ones((batch_size, seq_len + past_key_values_length)), tgt_len=seq_len), ) if attention_mask is not None: attention_mask = tf.cond( tf.greater(tf.shape(attention_mask)[-1], seq_len) & tf.greater(seq_len, 0), lambda: attention_mask[:, : seq_len + past_key_values_length], lambda: attention_mask, ) # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = _expand_mask(attention_mask, tgt_len=input_shape[-1]) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) return combined_attention_mask @unpack_inputs def call( self, input_ids=None, attention_mask=None, position_ids=None, encoder_hidden_states=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): r""" Args: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`WhisperTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(tf.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(tf.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input_shape = tf.shape(input_ids) input_ids = tf.reshape(input_ids, (-1, input_shape[-1])) elif inputs_embeds is not None: input_shape = tf.shape(inputs_embeds)[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if inputs_embeds is None: # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound # indices on GPU, returning zeros instead. This is a dangerous silent behavior. tf.debugging.assert_less( input_ids, tf.cast(self.embed_tokens.input_dim, dtype=input_ids.dtype), message=( "input_ids must be smaller than the embedding layer's input dimension (got" f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.input_dim})" ), ) inputs_embeds = self.embed_tokens(input_ids) attention_mask = self._prepare_decoder_attention_mask(attention_mask, input_shape, past_key_values_length) # embed positions filled_past_positions = past_key_values_length if position_ids is None else position_ids[0, -1] positions = self.embed_positions(input_ids, past_key_values_length=filled_past_positions) hidden_states = inputs_embeds + positions hidden_states = self.dropout(hidden_states, training=training) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None next_decoder_cache = () if use_cache else None # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]: if attn_mask is not None: tf.debugging.assert_equal( shape_list(attn_mask)[0], len(self.decoder_layers), message=( f"The {attn_mask_name} should be specified for {len(self.decoder_layers)} layers, but it is" f" for {shape_list(attn_mask)[0]}." ), ) for idx, decoder_layer in enumerate(self.decoder_layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if training and (dropout_probability < self.layerdrop): continue past_key_value = past_key_values[idx] if past_key_values is not None else None layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, layer_head_mask=(head_mask[idx] if head_mask is not None else None), cross_attn_layer_head_mask=(cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None), past_key_value=past_key_value, training=training, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[3],) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) hidden_states = self.layer_norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return TFBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) @add_start_docstrings( "The bare Whisper Model outputting raw hidden-states without any specific head on top.", WHISPER_START_DOCSTRING, ) @keras_serializable class TFWhisperMainLayer(tf.keras.layers.Layer): config_class = WhisperConfig def __init__(self, config: WhisperConfig, **kwargs): super().__init__(**kwargs) self.config = config self.encoder = TFWhisperEncoder(config, name="encoder") self.decoder = TFWhisperDecoder(config, name="decoder") def get_input_embeddings(self): return self.decoder.embed_tokens def set_input_embeddings(self, value): self.decoder.embed_tokens = value def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(WHISPER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @unpack_inputs def call( self, input_features=None, decoder_input_ids=None, decoder_attention_mask=None, decoder_position_ids=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, encoder_outputs=None, past_key_values=None, decoder_inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): r""" Returns: Example: ```python >>> import tensorflow as tf >>> from transformers import TFWhisperModel, WhisperFeatureExtractor >>> from datasets import load_dataset >>> model = TFWhisperModel.from_pretrained("openai/whisper-base") >>> feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-base") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = feature_extractor(ds[0]["audio"]["array"], return_tensors="tf") >>> input_features = inputs.input_features >>> decoder_input_ids = tf.convert_to_tensor([[1, 1]]) * model.config.decoder_start_token_id >>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state >>> list(last_hidden_state.shape) [1, 2, 512] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder( input_features, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) # If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, TFBaseModelOutput): encoder_outputs = TFBaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) if not return_dict: return decoder_outputs + encoder_outputs return TFSeq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings( "The bare Whisper Model outputting raw hidden-states without any specific head on top.", WHISPER_START_DOCSTRING, ) class TFWhisperModel(TFWhisperPreTrainedModel): def __init__(self, config: WhisperConfig, **kwargs): super().__init__(config, **kwargs) self.model = TFWhisperMainLayer(config, name="model") def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value def get_encoder(self): return self.model.encoder def get_decoder(self): return self.model.decoder def decoder(self): return self.model.decoder def encoder(self): return self.model.encoder @add_start_docstrings_to_model_forward(WHISPER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @unpack_inputs def call( self, input_features=None, decoder_input_ids=None, decoder_attention_mask=None, decoder_position_ids=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, encoder_outputs=None, past_key_values=None, decoder_inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): r""" Returns: Example: ```python >>> import tensorflow as tf >>> from transformers import TFWhisperModel, WhisperFeatureExtractor >>> from datasets import load_dataset >>> model = TFWhisperModel.from_pretrained("openai/whisper-base") >>> feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-base") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = feature_extractor(ds[0]["audio"]["array"], return_tensors="tf") >>> input_features = inputs.input_features >>> decoder_input_ids = tf.convert_to_tensor([[1, 1]]) * model.config.decoder_start_token_id >>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state >>> list(last_hidden_state.shape) [1, 2, 512] ```""" outputs = self.model( input_features=input_features, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def serving_output(self, output): pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None return TFSeq2SeqModelOutput( last_hidden_state=output.last_hidden_state, past_key_values=pkv, decoder_hidden_states=dec_hs, decoder_attentions=dec_attns, cross_attentions=cross_attns, encoder_last_hidden_state=output.encoder_last_hidden_state, encoder_hidden_states=enc_hs, encoder_attentions=enc_attns, ) @add_start_docstrings( "The Whisper Model with a language modeling head. Can be used for automatic speech recognition.", WHISPER_START_DOCSTRING, ) class TFWhisperForConditionalGeneration(TFWhisperPreTrainedModel, TFCausalLanguageModelingLoss): base_model_prefix = "model" _keys_to_ignore_on_load_missing = [ r"encoder.version", r"decoder.version", r"proj_out.weight", ] _keys_to_ignore_on_save = [ r"proj_out.weight", ] def __init__(self, config: WhisperConfig, **kwargs): super().__init__(config, **kwargs) self.model = TFWhisperMainLayer(config, name="model") def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() def get_output_embeddings(self): return self.get_input_embeddings() def set_output_embeddings(self, value): self.set_input_embeddings(value) def resize_token_embeddings(self, new_num_tokens: int) -> tf.keras.layers.Embedding: new_embeddings = super().resize_token_embeddings(new_num_tokens) return new_embeddings @add_start_docstrings_to_model_forward(WHISPER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @unpack_inputs def call( self, input_features=None, decoder_input_ids=None, decoder_attention_mask=None, decoder_position_ids=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, encoder_outputs=None, past_key_values=None, decoder_inputs_embeds=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Example: ```python >>> import tensorflow as tf >>> from transformers import WhisperProcessor, TFWhisperForConditionalGeneration >>> from datasets import load_dataset >>> processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") >>> model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = processor(ds[0]["audio"]["array"], return_tensors="tf") >>> input_features = inputs.input_features >>> generated_ids = model.generate(input_ids=input_features) >>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] >>> transcription ' Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.' ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) outputs = self.model( input_features, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) decoder_last_hidden_state = outputs[0] # Decoder and encoder embeddings are tied lm_logits = tf.matmul(decoder_last_hidden_state, self.get_output_embeddings().weights, transpose_b=True) loss = None if labels is None else self.hf_compute_loss(labels, lm_logits) if not return_dict: output = (lm_logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFSeq2SeqLMOutput( loss=loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) def serving_output(self, output): pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None return TFSeq2SeqLMOutput( logits=output.logits, past_key_values=pkv, decoder_hidden_states=dec_hs, decoder_attentions=dec_attns, cross_attentions=cross_attns, encoder_last_hidden_state=output.encoder_last_hidden_state, encoder_hidden_states=enc_hs, encoder_attentions=enc_attns, ) def prepare_inputs_for_generation( self, decoder_input_ids, past=None, use_cache=None, encoder_outputs=None, attention_mask=None, decoder_attention_mask=None, **kwargs ): # cut decoder_input_ids if past is used if past is not None: decoder_input_ids = decoder_input_ids[:, -1:] if decoder_attention_mask is not None: # xla decoder_position_ids = tf.math.cumsum(decoder_attention_mask, axis=-1, exclusive=True)[:, -1:] elif past is not None: # no xla + past decoder_position_ids = past[0][0].shape[2] else: # no xla + no past decoder_position_ids = tf.range(decoder_input_ids.shape[1]) decoder_position_ids = tf.broadcast_to(decoder_position_ids, decoder_input_ids.shape) return { "input_features": None, # Needs to be passed to make Keras.layer.__call__ happy "encoder_outputs": encoder_outputs, "past_key_values": past, "decoder_input_ids": decoder_input_ids, "use_cache": use_cache, "decoder_attention_mask": decoder_attention_mask, "decoder_position_ids": decoder_position_ids, } # @staticmethod def _reorder_cache(past, beam_idx): reordered_past = () for layer_past in past: reordered_past += (tuple(tf.gather(past_state, beam_idx) for past_state in layer_past),) return reordered_past
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./docs/source/en/main_classes/keras_callbacks.mdx
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Keras callbacks When training a Transformers model with Keras, there are some library-specific callbacks available to automate common tasks: ## KerasMetricCallback [[autodoc]] KerasMetricCallback ## PushToHubCallback [[autodoc]] PushToHubCallback
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Keras callbacks When training a Transformers model with Keras, there are some library-specific callbacks available to automate common tasks: ## KerasMetricCallback [[autodoc]] KerasMetricCallback ## PushToHubCallback [[autodoc]] PushToHubCallback
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./scripts/tatoeba/README.md
<!--- Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> Setup transformers following instructions in README.md, (I would fork first). ```bash git clone git@github.com:huggingface/transformers.git cd transformers pip install -e . pip install pandas GitPython wget ``` Get required metadata ``` curl https://cdn-datasets.huggingface.co/language_codes/language-codes-3b2.csv > language-codes-3b2.csv curl https://cdn-datasets.huggingface.co/language_codes/iso-639-3.csv > iso-639-3.csv ``` Install Tatoeba-Challenge repo inside transformers ```bash git clone git@github.com:Helsinki-NLP/Tatoeba-Challenge.git ``` To convert a few models, call the conversion script from command line: ```bash python src/transformers/models/marian/convert_marian_tatoeba_to_pytorch.py --models heb-eng eng-heb --save_dir converted ``` To convert lots of models you can pass your list of Tatoeba model names to `resolver.convert_models` in a python client or script. ```python from transformers.convert_marian_tatoeba_to_pytorch import TatoebaConverter resolver = TatoebaConverter(save_dir='converted') resolver.convert_models(['heb-eng', 'eng-heb']) ``` ### Upload converted models Since version v3.5.0, the model sharing workflow is switched to git-based system . Refer to [model sharing doc](https://huggingface.co/transformers/main/model_sharing.html#model-sharing-and-uploading) for more details. To upload all converted models, 1. Install [git-lfs](https://git-lfs.github.com/). 2. Login to `transformers-cli` ```bash huggingface-cli login ``` 3. Run the `upload_models` script ```bash ./scripts/tatoeba/upload_models.sh ``` ### Modifications - To change naming logic, change the code near `os.rename`. The model card creation code may also need to change. - To change model card content, you must modify `TatoebaCodeResolver.write_model_card`
<!--- Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> Setup transformers following instructions in README.md, (I would fork first). ```bash git clone git@github.com:huggingface/transformers.git cd transformers pip install -e . pip install pandas GitPython wget ``` Get required metadata ``` curl https://cdn-datasets.huggingface.co/language_codes/language-codes-3b2.csv > language-codes-3b2.csv curl https://cdn-datasets.huggingface.co/language_codes/iso-639-3.csv > iso-639-3.csv ``` Install Tatoeba-Challenge repo inside transformers ```bash git clone git@github.com:Helsinki-NLP/Tatoeba-Challenge.git ``` To convert a few models, call the conversion script from command line: ```bash python src/transformers/models/marian/convert_marian_tatoeba_to_pytorch.py --models heb-eng eng-heb --save_dir converted ``` To convert lots of models you can pass your list of Tatoeba model names to `resolver.convert_models` in a python client or script. ```python from transformers.convert_marian_tatoeba_to_pytorch import TatoebaConverter resolver = TatoebaConverter(save_dir='converted') resolver.convert_models(['heb-eng', 'eng-heb']) ``` ### Upload converted models Since version v3.5.0, the model sharing workflow is switched to git-based system . Refer to [model sharing doc](https://huggingface.co/transformers/main/model_sharing.html#model-sharing-and-uploading) for more details. To upload all converted models, 1. Install [git-lfs](https://git-lfs.github.com/). 2. Login to `transformers-cli` ```bash huggingface-cli login ``` 3. Run the `upload_models` script ```bash ./scripts/tatoeba/upload_models.sh ``` ### Modifications - To change naming logic, change the code near `os.rename`. The model card creation code may also need to change. - To change model card content, you must modify `TatoebaCodeResolver.write_model_card`
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./tests/trainer/test_trainer_distributed.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu, ) from transformers.utils import logging logger = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class DummyDataset(Dataset): def __init__(self, length: int = 101): self.length = length def __len__(self): return self.length def __getitem__(self, i) -> int: return i class DummyDataCollator: def __call__(self, features): return {"input_ids": torch.tensor(features), "labels": torch.tensor(features)} class DummyModel(nn.Module): def __init__(self): super().__init__() # Add some (unused) params otherwise DDP will complain. self.fc = nn.Linear(120, 80) def forward(self, input_ids, labels=None): if labels is not None: return torch.tensor(0.0, device=input_ids.device), input_ids else: return input_ids class TestTrainerDistributed(TestCasePlus): @require_torch_multi_gpu def test_trainer(self): distributed_args = f""" -m torch.distributed.launch --nproc_per_node={torch.cuda.device_count()} --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py """.split() output_dir = self.get_auto_remove_tmp_dir() args = f"--output_dir {output_dir}".split() cmd = [sys.executable] + distributed_args + args execute_subprocess_async(cmd, env=self.get_env()) # successful return here == success - any errors would have caused an error in the sub-call if __name__ == "__main__": # The script below is meant to be run under torch.distributed, on a machine with multiple GPUs: # # PYTHONPATH="src" python -m torch.distributed.launch --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py parser = HfArgumentParser((TrainingArguments,)) training_args = parser.parse_args_into_dataclasses()[0] logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " f"distributed training: {training_args.local_rank != -1}" ) # Essentially, what we want to verify in the distributed case is that we get all samples back, # in the right order. (this is crucial for prediction for instance) for dataset_length in [101, 40, 7]: dataset = DummyDataset(dataset_length) def compute_metrics(p: EvalPrediction) -> Dict: sequential = list(range(len(dataset))) success = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential if not success and training_args.local_rank == 0: logger.warning( "Predictions and/or labels do not match expected results:\n - predictions: " f"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" ) return {"success": success} trainer = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) metrics = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) p = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) trainer.args.eval_accumulation_steps = 2 metrics = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) p = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) trainer.args.eval_accumulation_steps = None
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu, ) from transformers.utils import logging logger = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class DummyDataset(Dataset): def __init__(self, length: int = 101): self.length = length def __len__(self): return self.length def __getitem__(self, i) -> int: return i class DummyDataCollator: def __call__(self, features): return {"input_ids": torch.tensor(features), "labels": torch.tensor(features)} class DummyModel(nn.Module): def __init__(self): super().__init__() # Add some (unused) params otherwise DDP will complain. self.fc = nn.Linear(120, 80) def forward(self, input_ids, labels=None): if labels is not None: return torch.tensor(0.0, device=input_ids.device), input_ids else: return input_ids class TestTrainerDistributed(TestCasePlus): @require_torch_multi_gpu def test_trainer(self): distributed_args = f""" -m torch.distributed.launch --nproc_per_node={torch.cuda.device_count()} --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py """.split() output_dir = self.get_auto_remove_tmp_dir() args = f"--output_dir {output_dir}".split() cmd = [sys.executable] + distributed_args + args execute_subprocess_async(cmd, env=self.get_env()) # successful return here == success - any errors would have caused an error in the sub-call if __name__ == "__main__": # The script below is meant to be run under torch.distributed, on a machine with multiple GPUs: # # PYTHONPATH="src" python -m torch.distributed.launch --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py parser = HfArgumentParser((TrainingArguments,)) training_args = parser.parse_args_into_dataclasses()[0] logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " f"distributed training: {training_args.local_rank != -1}" ) # Essentially, what we want to verify in the distributed case is that we get all samples back, # in the right order. (this is crucial for prediction for instance) for dataset_length in [101, 40, 7]: dataset = DummyDataset(dataset_length) def compute_metrics(p: EvalPrediction) -> Dict: sequential = list(range(len(dataset))) success = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential if not success and training_args.local_rank == 0: logger.warning( "Predictions and/or labels do not match expected results:\n - predictions: " f"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" ) return {"success": success} trainer = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) metrics = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) p = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) trainer.args.eval_accumulation_steps = 2 metrics = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) p = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) trainer.args.eval_accumulation_steps = None
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./examples/research_projects/distillation/lm_seqs_dataset.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Dataset to distilled models adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) """ import numpy as np import torch from torch.utils.data import Dataset from utils import logger class LmSeqsDataset(Dataset): """Custom Dataset wrapping language modeling sequences. Each sample will be retrieved by indexing the list of token_ids and their corresponding lengths. Input: ------ params: `NameSpace` parameters data: `List[np.array[int]] """ def __init__(self, params, data): self.params = params self.token_ids = np.array(data) self.lengths = np.array([len(t) for t in data]) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__(self, index): return (self.token_ids[index], self.lengths[index]) def __len__(self): return len(self.lengths) def check(self): """ Some sanity checks """ assert len(self.token_ids) == len(self.lengths) assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths))) def remove_long_sequences(self): """ Sequences that are too long are split by chunk of max_model_input_size. """ max_len = self.params.max_model_input_size indices = self.lengths > max_len logger.info(f"Splitting {sum(indices)} too long sequences.") def divide_chunks(l, n): return [l[i : i + n] for i in range(0, len(l), n)] new_tok_ids = [] new_lengths = [] if self.params.mlm: cls_id, sep_id = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"] else: cls_id, sep_id = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"] for seq_, len_ in zip(self.token_ids, self.lengths): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_) new_lengths.append(len_) else: sub_seqs = [] for sub_s in divide_chunks(seq_, max_len - 2): if sub_s[0] != cls_id: sub_s = np.insert(sub_s, 0, cls_id) if sub_s[-1] != sep_id: sub_s = np.insert(sub_s, len(sub_s), sep_id) assert len(sub_s) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(sub_s) new_tok_ids.extend(sub_seqs) new_lengths.extend([len(l) for l in sub_seqs]) self.token_ids = np.array(new_tok_ids) self.lengths = np.array(new_lengths) def remove_empty_sequences(self): """ Too short sequences are simply removed. This could be tuned. """ init_size = len(self) indices = self.lengths > 11 self.token_ids = self.token_ids[indices] self.lengths = self.lengths[indices] new_size = len(self) logger.info(f"Remove {init_size - new_size} too short (<=11 tokens) sequences.") def remove_unknown_sequences(self): """ Remove sequences with a (too) high level of unknown tokens. """ if "unk_token" not in self.params.special_tok_ids: return else: unk_token_id = self.params.special_tok_ids["unk_token"] init_size = len(self) unk_occs = np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids]) indices = (unk_occs / self.lengths) < 0.5 self.token_ids = self.token_ids[indices] self.lengths = self.lengths[indices] new_size = len(self) logger.info(f"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).") def print_statistics(self): """ Print some statistics on the corpus. Only the master process. """ if not self.params.is_master: return logger.info(f"{len(self)} sequences") # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def batch_sequences(self, batch): """ Do the padding and transform into torch.tensor. """ token_ids = [t[0] for t in batch] lengths = [t[1] for t in batch] assert len(token_ids) == len(lengths) # Max for paddings max_seq_len_ = max(lengths) # Pad token ids if self.params.mlm: pad_idx = self.params.special_tok_ids["pad_token"] else: pad_idx = self.params.special_tok_ids["unk_token"] tk_ = [list(t.astype(int)) + [pad_idx] * (max_seq_len_ - len(t)) for t in token_ids] assert len(tk_) == len(token_ids) assert all(len(t) == max_seq_len_ for t in tk_) tk_t = torch.tensor(tk_) # (bs, max_seq_len_) lg_t = torch.tensor(lengths) # (bs) return tk_t, lg_t
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Dataset to distilled models adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) """ import numpy as np import torch from torch.utils.data import Dataset from utils import logger class LmSeqsDataset(Dataset): """Custom Dataset wrapping language modeling sequences. Each sample will be retrieved by indexing the list of token_ids and their corresponding lengths. Input: ------ params: `NameSpace` parameters data: `List[np.array[int]] """ def __init__(self, params, data): self.params = params self.token_ids = np.array(data) self.lengths = np.array([len(t) for t in data]) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__(self, index): return (self.token_ids[index], self.lengths[index]) def __len__(self): return len(self.lengths) def check(self): """ Some sanity checks """ assert len(self.token_ids) == len(self.lengths) assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths))) def remove_long_sequences(self): """ Sequences that are too long are split by chunk of max_model_input_size. """ max_len = self.params.max_model_input_size indices = self.lengths > max_len logger.info(f"Splitting {sum(indices)} too long sequences.") def divide_chunks(l, n): return [l[i : i + n] for i in range(0, len(l), n)] new_tok_ids = [] new_lengths = [] if self.params.mlm: cls_id, sep_id = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"] else: cls_id, sep_id = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"] for seq_, len_ in zip(self.token_ids, self.lengths): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_) new_lengths.append(len_) else: sub_seqs = [] for sub_s in divide_chunks(seq_, max_len - 2): if sub_s[0] != cls_id: sub_s = np.insert(sub_s, 0, cls_id) if sub_s[-1] != sep_id: sub_s = np.insert(sub_s, len(sub_s), sep_id) assert len(sub_s) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(sub_s) new_tok_ids.extend(sub_seqs) new_lengths.extend([len(l) for l in sub_seqs]) self.token_ids = np.array(new_tok_ids) self.lengths = np.array(new_lengths) def remove_empty_sequences(self): """ Too short sequences are simply removed. This could be tuned. """ init_size = len(self) indices = self.lengths > 11 self.token_ids = self.token_ids[indices] self.lengths = self.lengths[indices] new_size = len(self) logger.info(f"Remove {init_size - new_size} too short (<=11 tokens) sequences.") def remove_unknown_sequences(self): """ Remove sequences with a (too) high level of unknown tokens. """ if "unk_token" not in self.params.special_tok_ids: return else: unk_token_id = self.params.special_tok_ids["unk_token"] init_size = len(self) unk_occs = np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids]) indices = (unk_occs / self.lengths) < 0.5 self.token_ids = self.token_ids[indices] self.lengths = self.lengths[indices] new_size = len(self) logger.info(f"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).") def print_statistics(self): """ Print some statistics on the corpus. Only the master process. """ if not self.params.is_master: return logger.info(f"{len(self)} sequences") # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def batch_sequences(self, batch): """ Do the padding and transform into torch.tensor. """ token_ids = [t[0] for t in batch] lengths = [t[1] for t in batch] assert len(token_ids) == len(lengths) # Max for paddings max_seq_len_ = max(lengths) # Pad token ids if self.params.mlm: pad_idx = self.params.special_tok_ids["pad_token"] else: pad_idx = self.params.special_tok_ids["unk_token"] tk_ = [list(t.astype(int)) + [pad_idx] * (max_seq_len_ - len(t)) for t in token_ids] assert len(tk_) == len(token_ids) assert all(len(t) == max_seq_len_ for t in tk_) tk_t = torch.tensor(tk_) # (bs, max_seq_len_) lg_t = torch.tensor(lengths) # (bs) return tk_t, lg_t
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./tests/models/ctrl/__init__.py
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./src/transformers/models/gpt2/convert_gpt2_original_tf_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert OpenAI GPT checkpoint.""" import argparse import torch from transformers import GPT2Config, GPT2Model, load_tf_weights_in_gpt2 from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def convert_gpt2_checkpoint_to_pytorch(gpt2_checkpoint_path, gpt2_config_file, pytorch_dump_folder_path): # Construct model if gpt2_config_file == "": config = GPT2Config() else: config = GPT2Config.from_json_file(gpt2_config_file) model = GPT2Model(config) # Load weights from numpy load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path) # Save pytorch-model pytorch_weights_dump_path = pytorch_dump_folder_path + "/" + WEIGHTS_NAME pytorch_config_dump_path = pytorch_dump_folder_path + "/" + CONFIG_NAME print(f"Save PyTorch model to {pytorch_weights_dump_path}") torch.save(model.state_dict(), pytorch_weights_dump_path) print(f"Save configuration file to {pytorch_config_dump_path}") with open(pytorch_config_dump_path, "w", encoding="utf-8") as f: f.write(config.to_json_string()) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--gpt2_config_file", default="", type=str, help=( "An optional config json file corresponding to the pre-trained OpenAI model. \n" "This specifies the model architecture." ), ) args = parser.parse_args() convert_gpt2_checkpoint_to_pytorch(args.gpt2_checkpoint_path, args.gpt2_config_file, args.pytorch_dump_folder_path)
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert OpenAI GPT checkpoint.""" import argparse import torch from transformers import GPT2Config, GPT2Model, load_tf_weights_in_gpt2 from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def convert_gpt2_checkpoint_to_pytorch(gpt2_checkpoint_path, gpt2_config_file, pytorch_dump_folder_path): # Construct model if gpt2_config_file == "": config = GPT2Config() else: config = GPT2Config.from_json_file(gpt2_config_file) model = GPT2Model(config) # Load weights from numpy load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path) # Save pytorch-model pytorch_weights_dump_path = pytorch_dump_folder_path + "/" + WEIGHTS_NAME pytorch_config_dump_path = pytorch_dump_folder_path + "/" + CONFIG_NAME print(f"Save PyTorch model to {pytorch_weights_dump_path}") torch.save(model.state_dict(), pytorch_weights_dump_path) print(f"Save configuration file to {pytorch_config_dump_path}") with open(pytorch_config_dump_path, "w", encoding="utf-8") as f: f.write(config.to_json_string()) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--gpt2_config_file", default="", type=str, help=( "An optional config json file corresponding to the pre-trained OpenAI model. \n" "This specifies the model architecture." ), ) args = parser.parse_args() convert_gpt2_checkpoint_to_pytorch(args.gpt2_checkpoint_path, args.gpt2_config_file, args.pytorch_dump_folder_path)
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./src/transformers/models/bert_generation/modeling_bert_generation.py
# coding=utf-8 # Copyright 2020 The Google AI Language Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch BERT model specific for generation.""" import math from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_bert_generation import BertGenerationConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "google/bert_for_seq_generation_L-24_bbc_encoder" _CONFIG_FOR_DOC = "BertGenerationConfig" _TOKENIZER_FOR_DOC = "BertGenerationTokenizer" # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->BertGeneration class BertGenerationSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->BertGeneration class BertGenerationSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr( config, "position_embedding_type", "absolute" ) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) use_cache = past_key_value is not None if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": query_length, key_length = query_layer.shape[2], key_layer.shape[2] if use_cache: position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view( -1, 1 ) else: position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BertGenerationModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->BertGeneration class BertGenerationAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = BertGenerationSelfAttention(config, position_embedding_type=position_embedding_type) self.output = BertGenerationSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->BertGeneration class BertGenerationIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->BertGeneration class BertGenerationOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->BertGeneration class BertGenerationLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = BertGenerationAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = BertGenerationAttention(config, position_embedding_type="absolute") self.intermediate = BertGenerationIntermediate(config) self.output = BertGenerationOutput(config) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" " by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->BertGeneration class BertEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([BertGenerationLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, past_key_value, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) def load_tf_weights_in_bert_generation( model, tf_hub_path, model_class, is_encoder_named_decoder=False, is_encoder=False ): try: import numpy as np import tensorflow.compat.v1 as tf import tensorflow_hub as hub import tensorflow_text # noqa: F401 tf.disable_eager_execution() except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_model = hub.Module(tf_hub_path) init = tf.global_variables_initializer() with tf.Session() as sess: init.run() all_variables = tf_model.variable_map keep_track_variables = all_variables.copy() for key in list(all_variables.keys()): if "global" in key: logger.info(f"Skipping {key}...") continue if not is_encoder: model_pointer = getattr(model, model_class) else: model_pointer = model is_embedding = False logger.info(f"Trying to match {key}...") # remove start_string = "module/bert/" sub_layers = key.split("/")[2:] if is_encoder_named_decoder and sub_layers[0] == "encoder": logger.info(f"Skipping encoder layer {key} for decoder") continue if is_encoder and sub_layers[0] == "decoder": logger.info(f"Skipping decoder layer {key} for encoder") continue for i, sub_layer in enumerate(sub_layers): if sub_layer == "embeddings": is_embedding = True elif sub_layer == "LayerNorm": is_embedding = False if "layer" in sub_layer: model_pointer = model_pointer.layer[int(sub_layer.split("_")[-1])] elif sub_layer in ["kernel", "gamma"]: model_pointer = model_pointer.weight elif sub_layer == "beta": model_pointer = model_pointer.bias elif sub_layer == "encdec": model_pointer = model_pointer.crossattention.self elif sub_layer == "encdec_output": model_pointer = model_pointer.crossattention.output elif is_encoder_named_decoder and sub_layer == "decoder": model_pointer = model_pointer.encoder else: if sub_layer == "attention" and "encdec" in sub_layers[i + 1]: continue try: model_pointer = getattr(model_pointer, sub_layer) except AttributeError: logger.info(f"Skipping to initialize {key} at {sub_layer}...") raise AttributeError array = np.asarray(sess.run(all_variables[key])) if not is_embedding: logger.info(f"Transposing numpy weight of shape {array.shape} for {key}") array = np.transpose(array) else: model_pointer = model_pointer.weight if model_pointer.shape != array.shape: raise ValueError(f"Pointer shape {model_pointer.shape} and array shape {array.shape} mismatched") logger.info(f"Initialize PyTorch weight {key}") model_pointer.data = torch.from_numpy(array.astype(np.float32)) keep_track_variables.pop(key, None) logger.info(f"Weights not copied to PyTorch model: {', '.join(keep_track_variables.keys())}") return model class BertGenerationEmbeddings(nn.Module): """Construct the embeddings from word and position embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) def forward(self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) embeddings = inputs_embeds + position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class BertGenerationPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BertGenerationConfig base_model_prefix = "bert" supports_gradient_checkpointing = True _keys_to_ignore_on_load_missing = [r"position_ids"] def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, BertEncoder): module.gradient_checkpointing = value BERT_GENERATION_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`BertGenerationConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ BERT_GENERATION_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`BertGenerationTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare BertGeneration model transformer outputting raw hidden-states without any specific head on top.", BERT_GENERATION_START_DOCSTRING, ) class BertGenerationEncoder(BertGenerationPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in [Attention is all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. This model should be used when leveraging Bert or Roberta checkpoints for the [`EncoderDecoderModel`] class as described in [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, and Aliaksei Severyn. To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. """ def __init__(self, config): super().__init__(config) self.config = config self.embeddings = BertGenerationEmbeddings(config) self.encoder = BertEncoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(BERT_GENERATION_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: `1` for tokens that are NOT MASKED, `0` for MASKED tokens. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask = None if not use_cache: extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=sequence_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) class BertGenerationOnlyLMHead(nn.Module): def __init__(self, config): super().__init__() self.decoder = nn.Linear(config.hidden_size, config.vocab_size) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def forward(self, hidden_states): logits = self.decoder(hidden_states) return logits def _tie_weights(self): # To tie those two weights if they get disconnected (on TPU or when the bias is resized) self.bias = self.decoder.bias @add_start_docstrings( """BertGeneration Model with a `language modeling` head on top for CLM fine-tuning.""", BERT_GENERATION_START_DOCSTRING, ) class BertGenerationDecoder(BertGenerationPreTrainedModel): _keys_to_ignore_on_load_missing = ["lm_head.decoder.weight", "lm_head.decoder.bias", "embeddings.position_ids"] def __init__(self, config): super().__init__(config) if not config.is_decoder: logger.warning("If you want to use `BertGenerationDecoder` as a standalone, add `is_decoder=True.`") self.bert = BertGenerationEncoder(config) self.lm_head = BertGenerationOnlyLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings @add_start_docstrings_to_model_forward(BERT_GENERATION_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). Returns: Example: ```python >>> from transformers import BertGenerationTokenizer, BertGenerationDecoder, BertGenerationConfig >>> import torch >>> tokenizer = BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") >>> config = BertGenerationConfig.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") >>> config.is_decoder = True >>> model = BertGenerationDecoder.from_pretrained( ... "google/bert_for_seq_generation_L-24_bbc_encoder", config=config ... ) >>> inputs = tokenizer("Hello, my dog is cute", return_token_type_ids=False, return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False outputs = self.bert( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) lm_loss = None if labels is not None: # we are doing next-token prediction; shift prediction scores and input ids by one shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[1:] return ((lm_loss,) + output) if lm_loss is not None else output return CausalLMOutputWithCrossAttentions( loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs): input_shape = input_ids.shape # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: attention_mask = input_ids.new_ones(input_shape) # cut decoder_input_ids if past is used if past is not None: input_ids = input_ids[:, -1:] return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past} def _reorder_cache(self, past, beam_idx): reordered_past = () for layer_past in past: reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) return reordered_past
# coding=utf-8 # Copyright 2020 The Google AI Language Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch BERT model specific for generation.""" import math from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_bert_generation import BertGenerationConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "google/bert_for_seq_generation_L-24_bbc_encoder" _CONFIG_FOR_DOC = "BertGenerationConfig" _TOKENIZER_FOR_DOC = "BertGenerationTokenizer" # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->BertGeneration class BertGenerationSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->BertGeneration class BertGenerationSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr( config, "position_embedding_type", "absolute" ) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) use_cache = past_key_value is not None if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": query_length, key_length = query_layer.shape[2], key_layer.shape[2] if use_cache: position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view( -1, 1 ) else: position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BertGenerationModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->BertGeneration class BertGenerationAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = BertGenerationSelfAttention(config, position_embedding_type=position_embedding_type) self.output = BertGenerationSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->BertGeneration class BertGenerationIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->BertGeneration class BertGenerationOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->BertGeneration class BertGenerationLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = BertGenerationAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = BertGenerationAttention(config, position_embedding_type="absolute") self.intermediate = BertGenerationIntermediate(config) self.output = BertGenerationOutput(config) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" " by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->BertGeneration class BertEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([BertGenerationLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, past_key_value, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) def load_tf_weights_in_bert_generation( model, tf_hub_path, model_class, is_encoder_named_decoder=False, is_encoder=False ): try: import numpy as np import tensorflow.compat.v1 as tf import tensorflow_hub as hub import tensorflow_text # noqa: F401 tf.disable_eager_execution() except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_model = hub.Module(tf_hub_path) init = tf.global_variables_initializer() with tf.Session() as sess: init.run() all_variables = tf_model.variable_map keep_track_variables = all_variables.copy() for key in list(all_variables.keys()): if "global" in key: logger.info(f"Skipping {key}...") continue if not is_encoder: model_pointer = getattr(model, model_class) else: model_pointer = model is_embedding = False logger.info(f"Trying to match {key}...") # remove start_string = "module/bert/" sub_layers = key.split("/")[2:] if is_encoder_named_decoder and sub_layers[0] == "encoder": logger.info(f"Skipping encoder layer {key} for decoder") continue if is_encoder and sub_layers[0] == "decoder": logger.info(f"Skipping decoder layer {key} for encoder") continue for i, sub_layer in enumerate(sub_layers): if sub_layer == "embeddings": is_embedding = True elif sub_layer == "LayerNorm": is_embedding = False if "layer" in sub_layer: model_pointer = model_pointer.layer[int(sub_layer.split("_")[-1])] elif sub_layer in ["kernel", "gamma"]: model_pointer = model_pointer.weight elif sub_layer == "beta": model_pointer = model_pointer.bias elif sub_layer == "encdec": model_pointer = model_pointer.crossattention.self elif sub_layer == "encdec_output": model_pointer = model_pointer.crossattention.output elif is_encoder_named_decoder and sub_layer == "decoder": model_pointer = model_pointer.encoder else: if sub_layer == "attention" and "encdec" in sub_layers[i + 1]: continue try: model_pointer = getattr(model_pointer, sub_layer) except AttributeError: logger.info(f"Skipping to initialize {key} at {sub_layer}...") raise AttributeError array = np.asarray(sess.run(all_variables[key])) if not is_embedding: logger.info(f"Transposing numpy weight of shape {array.shape} for {key}") array = np.transpose(array) else: model_pointer = model_pointer.weight if model_pointer.shape != array.shape: raise ValueError(f"Pointer shape {model_pointer.shape} and array shape {array.shape} mismatched") logger.info(f"Initialize PyTorch weight {key}") model_pointer.data = torch.from_numpy(array.astype(np.float32)) keep_track_variables.pop(key, None) logger.info(f"Weights not copied to PyTorch model: {', '.join(keep_track_variables.keys())}") return model class BertGenerationEmbeddings(nn.Module): """Construct the embeddings from word and position embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) def forward(self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) embeddings = inputs_embeds + position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class BertGenerationPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BertGenerationConfig base_model_prefix = "bert" supports_gradient_checkpointing = True _keys_to_ignore_on_load_missing = [r"position_ids"] def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, BertEncoder): module.gradient_checkpointing = value BERT_GENERATION_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`BertGenerationConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ BERT_GENERATION_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`BertGenerationTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare BertGeneration model transformer outputting raw hidden-states without any specific head on top.", BERT_GENERATION_START_DOCSTRING, ) class BertGenerationEncoder(BertGenerationPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in [Attention is all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. This model should be used when leveraging Bert or Roberta checkpoints for the [`EncoderDecoderModel`] class as described in [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, and Aliaksei Severyn. To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. """ def __init__(self, config): super().__init__(config) self.config = config self.embeddings = BertGenerationEmbeddings(config) self.encoder = BertEncoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(BERT_GENERATION_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: `1` for tokens that are NOT MASKED, `0` for MASKED tokens. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask = None if not use_cache: extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=sequence_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) class BertGenerationOnlyLMHead(nn.Module): def __init__(self, config): super().__init__() self.decoder = nn.Linear(config.hidden_size, config.vocab_size) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def forward(self, hidden_states): logits = self.decoder(hidden_states) return logits def _tie_weights(self): # To tie those two weights if they get disconnected (on TPU or when the bias is resized) self.bias = self.decoder.bias @add_start_docstrings( """BertGeneration Model with a `language modeling` head on top for CLM fine-tuning.""", BERT_GENERATION_START_DOCSTRING, ) class BertGenerationDecoder(BertGenerationPreTrainedModel): _keys_to_ignore_on_load_missing = ["lm_head.decoder.weight", "lm_head.decoder.bias", "embeddings.position_ids"] def __init__(self, config): super().__init__(config) if not config.is_decoder: logger.warning("If you want to use `BertGenerationDecoder` as a standalone, add `is_decoder=True.`") self.bert = BertGenerationEncoder(config) self.lm_head = BertGenerationOnlyLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings @add_start_docstrings_to_model_forward(BERT_GENERATION_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). Returns: Example: ```python >>> from transformers import BertGenerationTokenizer, BertGenerationDecoder, BertGenerationConfig >>> import torch >>> tokenizer = BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") >>> config = BertGenerationConfig.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") >>> config.is_decoder = True >>> model = BertGenerationDecoder.from_pretrained( ... "google/bert_for_seq_generation_L-24_bbc_encoder", config=config ... ) >>> inputs = tokenizer("Hello, my dog is cute", return_token_type_ids=False, return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False outputs = self.bert( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) lm_loss = None if labels is not None: # we are doing next-token prediction; shift prediction scores and input ids by one shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[1:] return ((lm_loss,) + output) if lm_loss is not None else output return CausalLMOutputWithCrossAttentions( loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs): input_shape = input_ids.shape # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: attention_mask = input_ids.new_ones(input_shape) # cut decoder_input_ids if past is used if past is not None: input_ids = input_ids[:, -1:] return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past} def _reorder_cache(self, past, beam_idx): reordered_past = () for layer_past in past: reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) return reordered_past
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./src/transformers/models/longt5/modeling_longt5.py
# coding=utf-8 # Copyright 2022 Google LLC., LongT5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch LongT5 model.""" import copy import math import warnings from typing import Any, List, Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from torch.utils.checkpoint import checkpoint from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import ALL_LAYERNORM_LAYERS, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_model_forward, is_torch_fx_proxy, logging, replace_return_docstrings, ) from .configuration_longt5 import LongT5Config logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "LongT5Config" _TOKENIZER_FOR_DOC = "T5Tokenizer" _CHECKPOINT_FOR_DOC = "google/long-t5-local-base" # TODO: Update before the merge LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST = [ "google/long-t5-local-base", "google/long-t5-local-large", "google/long-t5-tglobal-base", "google/long-t5-tglobal-large", ] def _pad_to_multiple(x: torch.Tensor, block_len: int, dim: int, pad_value: int = 0) -> torch.Tensor: """Pad a tensor so that a sequence length will be a multiple of `block_len`""" pad_len = -x.shape[dim] % block_len # Handle cases when an empty input sequence is given if not all(x.shape): new_shape = list(x.shape) new_shape[dim] += pad_len return torch.zeros(new_shape, dtype=x.dtype) pad = [(0, 0)] * x.ndim pad[dim] = (0, pad_len) pad = sum(pad[::-1], ()) x = nn.functional.pad(x, pad=pad, mode="constant", value=pad_value) return x def _split_into_blocks(x: torch.Tensor, block_len: int, dim: int) -> torch.Tensor: """Split an input tensor into blocks of a given `block_len` along the given `dim`. If the dimension length is not a multiple of `block_len`, it will be padded first with selected `pad_value`. """ # pad tensor to multiple of block_len if x.shape[dim] % block_len != 0: x = _pad_to_multiple(x, block_len, dim, pad_value=0) num_blocks = x.shape[dim] // block_len output_shape = x.shape[:dim] + (num_blocks, block_len) + x.shape[(dim + 1) :] # If 0 is in output_shape, we cannot apply reshape because of incompatibility with ONNX conversion if 0 in output_shape: return torch.empty(output_shape, dtype=x.dtype, device=x.device) return x.reshape(output_shape) def _concatenate_3_blocks(x: torch.Tensor, block_dim: int, sequence_dim: int, pad_value: int = 0) -> torch.Tensor: """Concatenate three consecutive blocks for each input block for local attentiont. For more information, see: https://arxiv.org/pdf/2112.07916.pdf. """ num_blocks = x.shape[block_dim] pad = [(0, 0)] * x.ndim pad[block_dim] = (1, 1) pad = sum(pad[::-1], ()) # [batch_size, num_blocks, block_len] -> [batch_size, num_blocks + 2, block_len] x = nn.functional.pad(x, pad=pad, mode="constant", value=pad_value) blocks_list: List[torch.Tensor] = [] for i in range(3): # We use indexing approach here: # https://numpy.org/doc/stable/user/basics.indexing.html#dealing-with-variable-numbers-of-indices-within-programs indices = [slice(0, None)] * x.ndim indices[block_dim] = slice(i, i + num_blocks) indices = tuple(indices) blocks_list.append(x[indices]) # [batch_size, num_blocks, 3 * block_len, ...] return torch.cat(blocks_list, dim=sequence_dim) def _make_3block_relative_position_ids(block_len: int) -> torch.Tensor: """Makes 3-blocked relative position ids for local attention.""" position_ids = torch.arange(3 * block_len, dtype=torch.int32) center_position_ids = position_ids[block_len:-block_len] # [block_len, 3 * block_len] relative_position_ids = position_ids.unsqueeze(0) - center_position_ids.unsqueeze(1) return relative_position_ids def _mask_local_attention_mask(local_attention_mask: torch.Tensor, block_len: int) -> torch.Tensor: """Mask local attention mask to enforce that tokens are not allowed to attend tokens farther than ``local_radius.""" relative_position_ids = _make_3block_relative_position_ids(block_len) locality_mask = torch.abs(relative_position_ids) < block_len locality_mask = locality_mask[None, None, :, :] locality_mask = locality_mask.to(local_attention_mask.device) return torch.logical_and(local_attention_mask, locality_mask) def _get_local_attention_mask(attention_mask: torch.Tensor, block_len: int, device: torch.device) -> torch.Tensor: """Prepare attention mask to be applied for a local attention.""" # [batch_size, num_blocks, block_len] _blocked_attention_mask = _split_into_blocks(attention_mask, block_len, dim=1) # [batch_size, num_block, 3 * block_len] _3blocked_attention_mask = _concatenate_3_blocks(_blocked_attention_mask, block_dim=1, sequence_dim=2) _blocked_attention_mask = _blocked_attention_mask.unsqueeze(-1) _3blocked_attention_mask = _3blocked_attention_mask.unsqueeze(-2) # [batch_size, num_block, block_len, 3 * block_len] local_attention_mask = torch.logical_and(_blocked_attention_mask, _3blocked_attention_mask) local_attention_mask = _mask_local_attention_mask(local_attention_mask, block_len) # [batch_size, 1, num_block, block_len, 3 * block_len] return local_attention_mask.unsqueeze(1).to(device) def _make_global_fixed_block_ids( attention_mask: torch.Tensor, global_block_size: int ) -> Tuple[torch.Tensor, torch.Tensor]: """Obtain the "fixed block" global id corresponding to each input token. This implementation is a simlified version of the original Flaxformr implementation adopted from: https://github.com/google/flaxformer/blob/main/flaxformer/architectures/longt5/long_attention.py. In our scenario, as we use this strategy only for a decoder, orphan tokens, i.e. those tokens which do not make for the whole fixed block, are assigned to the preceding block. Padding tokens from the original sequence are represented by -1. """ batch_size, seq_len = attention_mask.shape[:2] def handle_orphan_tokens(block_ids: torch.Tensor) -> torch.Tensor: block_ends = (torch.arange(seq_len) % global_block_size) == global_block_size - 1 block_ends = block_ends.to(block_ids.device) true_block_ends = torch.logical_and(block_ends, block_ids >= 0) full_blocks = true_block_ends.sum(-1).unsqueeze(-1).type(block_ids.dtype) - 1 block_ids = torch.where(block_ids < full_blocks, block_ids, full_blocks) return block_ids fixed_block_mask = torch.ones_like(attention_mask, device=attention_mask.device) / global_block_size fixed_block_mask = torch.cumsum(fixed_block_mask, axis=1) - fixed_block_mask mask = torch.where(attention_mask != 0.0, 1.0, -1000.0).type(attention_mask.dtype) global_block_ids = torch.floor(mask + fixed_block_mask - 1.0).type(attention_mask.dtype) _global_block_ids_lower_bound = torch.tensor(-1, dtype=global_block_ids.dtype, device=global_block_ids.device) global_block_ids = torch.where( global_block_ids > _global_block_ids_lower_bound, global_block_ids, _global_block_ids_lower_bound ) # set padding tokens to -1 global_block_ids = (global_block_ids * attention_mask) + (attention_mask - 1) # [batch_size, seq_len] global_block_ids = handle_orphan_tokens(global_block_ids) num_globals = seq_len // global_block_size # [batch_size, seq_len // global_block_size] if num_globals > 0: _sequence_block_ids_max = torch.max(global_block_ids, dim=-1).values.repeat(num_globals, 1).transpose(0, 1) else: _sequence_block_ids_max = torch.zeros( batch_size, 0, dtype=global_block_ids.dtype, device=global_block_ids.device ) global_segment_ids = torch.cumsum(torch.ones(batch_size, num_globals), dim=-1) - 1 global_segment_ids = global_segment_ids.to(attention_mask.device) global_segment_ids = torch.where(global_segment_ids <= _sequence_block_ids_max, 1, 0) return global_block_ids.type(torch.int), global_segment_ids.type(torch.int) def _make_side_relative_position_ids(attention_mask: torch.Tensor, global_block_size: int) -> torch.Tensor: """Create the relative position tensor for local -> global attention.""" block_ids, global_segment_ids = _make_global_fixed_block_ids(attention_mask, global_block_size) global_seq_len = global_segment_ids.shape[-1] global_positions = torch.arange(global_seq_len, device=block_ids.device) side_relative_position = global_positions - block_ids[..., None] return side_relative_position.type(torch.int64) def _create_global_aggregates( hidden_states: torch.Tensor, block_ids: torch.Tensor, global_seq_len: int ) -> torch.Tensor: """Compute individual block aggregates by summing over individual blocks.""" # (batch..., seq_len, global_seq_len)) block_ids = block_ids.where( block_ids >= 0, torch.tensor(global_seq_len, dtype=block_ids.dtype, device=block_ids.device) ) one_hot_block_ids = nn.functional.one_hot(block_ids.type(torch.int64), global_seq_len + 1)[:, :, :-1] return torch.einsum("...nd,...ng->...gd", hidden_states, one_hot_block_ids.type(hidden_states.dtype)) # Copied from transformers.models.t5.modeling_t5.T5LayerNorm with T5->LongT5 class LongT5LayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ Construct a layernorm module in the LongT5 style. No bias and no subtraction of mean. """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): # LongT5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) # convert into half-precision if necessary if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states try: from apex.normalization import FusedRMSNorm LongT5LayerNorm = FusedRMSNorm # noqa logger.info("Discovered apex.normalization.FusedRMSNorm - will use it instead of LongT5LayerNorm") except ImportError: # using the normal LongT5LayerNorm pass except Exception: logger.warning("discovered apex but it failed to load, falling back to LongT5LayerNorm") pass ALL_LAYERNORM_LAYERS.append(LongT5LayerNorm) # Copied from transformers.models.t5.modeling_t5.T5DenseActDense with T5->LongT5 class LongT5DenseActDense(nn.Module): def __init__(self, config: LongT5Config): super().__init__() self.wi = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5->LongT5 class LongT5DenseGatedActDense(nn.Module): def __init__(self, config: LongT5Config): super().__init__() self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5LayerFF with T5->LongT5 class LongT5LayerFF(nn.Module): def __init__(self, config: LongT5Config): super().__init__() if config.is_gated_act: self.DenseReluDense = LongT5DenseGatedActDense(config) else: self.DenseReluDense = LongT5DenseActDense(config) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5Attention with T5->LongT5 class LongT5Attention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh TensorFlow initialization to avoid scaling before softmax self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads ) # Prune linear layers self.q = prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index, dim=1) # Update hyper params self.n_heads = self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance relative_position_if_large = max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, query_length, key_length, device=None): """Compute binned relative position bias""" if device is None: device = self.relative_attention_bias.weight.device context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None] memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :] relative_position = memory_position - context_position # shape (query_length, key_length) relative_position_bucket = self._relative_position_bucket( relative_position, # shape (query_length, key_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads) values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length) return values def forward( self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_value=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, ): """ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). """ # Input is (batch_size, seq_length, dim) # Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length) # past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head) batch_size, seq_length = hidden_states.shape[:2] real_seq_length = seq_length if past_key_value is not None: assert ( len(past_key_value) == 2 ), f"past_key_value should have 2 past states: keys and values. Got { len(past_key_value)} past states" real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length key_length = real_seq_length if key_value_states is None else key_value_states.shape[1] def shape(states): """projection""" return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) def unshape(states): """reshape""" return states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim) def project(hidden_states, proj_layer, key_value_states, past_key_value): """projects hidden states correctly to key/query states""" if key_value_states is None: # self-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(hidden_states)) elif past_key_value is None: # cross-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(key_value_states)) if past_key_value is not None: if key_value_states is None: # self-attn # (batch_size, n_heads, key_length, dim_per_head) hidden_states = torch.cat([past_key_value, hidden_states], dim=2) else: # cross-attn hidden_states = past_key_value return hidden_states # get query states query_states = shape(self.q(hidden_states)) # (batch_size, n_heads, seq_length, dim_per_head) # get key/value states key_states = project( hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is not None else None ) value_states = project( hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is not None else None ) # compute scores scores = torch.matmul( query_states, key_states.transpose(3, 2) ) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9 if position_bias is None: if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device) # if key and values are already calculated # we want only the last query position bias if past_key_value is not None: position_bias = position_bias[:, :, -hidden_states.size(1) :, :] if mask is not None: position_bias = position_bias + mask # (batch_size, n_heads, seq_length, key_length) if self.pruned_heads: mask = torch.ones(position_bias.shape[1]) mask[list(self.pruned_heads)] = 0 position_bias_masked = position_bias[:, mask.bool()] else: position_bias_masked = position_bias scores += position_bias_masked attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as( scores ) # (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.dropout( attn_weights, p=self.dropout, training=self.training ) # (batch_size, n_heads, seq_length, key_length) # Mask heads if we want to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_output = unshape(torch.matmul(attn_weights, value_states)) # (batch_size, seq_length, dim) attn_output = self.o(attn_output) present_key_value_state = (key_states, value_states) if (self.is_decoder and use_cache) else None outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) if output_attentions: outputs = outputs + (attn_weights,) return outputs class LongT5LocalAttention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias: bool = False) -> None: super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.local_radius = config.local_radius self.block_len = self.local_radius + 1 self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh TensorFlow initialization to avoid scaling before softmax self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False # Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads ) # Prune linear layers self.q = prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index, dim=1) # Update hyper params self.n_heads = self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod # Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance relative_position_if_large = max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, block_length: int): """Compute binned relative position bias""" memory_position = torch.arange( 3 * block_length, dtype=torch.long, device=self.relative_attention_bias.weight.device ) context_position = memory_position[block_length:-block_length] # (block_length, 3 * block_length) relative_position = memory_position[None, :] - context_position[:, None] relative_position_bucket = self._relative_position_bucket( relative_position, # (block_length, 3 * block_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) # (block_length, 3 * block_length, num_heads) values = self.relative_attention_bias(relative_position_bucket) # (1, 1, num_heads, block_length, 3 * block_length) values = values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0) return values def forward( self, hidden_states, mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, ): batch_size, seq_length = hidden_states.shape[:2] def shape(states): """projection""" return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim) def unshape(states): """reshape""" return states.contiguous().view(batch_size, -1, self.inner_dim) # get query/key/value states -> (batch_size, seq_length, n_heads, dim_per_head) query_states = shape(self.q(hidden_states)) key_states = shape(self.k(hidden_states)) value_states = shape(self.v(hidden_states)) # Split into blocks -> (batch_size, num_blocks, block_len, n_heads, dim_per_head) query_states = _split_into_blocks(query_states, self.block_len, dim=1) key_states = _split_into_blocks(key_states, self.block_len, dim=1) value_states = _split_into_blocks(value_states, self.block_len, dim=1) # Concatenate 3 blocks for keys and values -> (batch_size, num_blocks, 3 * block_len, n_heads, dim_per_head) key_states = _concatenate_3_blocks(key_states, block_dim=1, sequence_dim=2) value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2) # Compute scores scores = torch.einsum( "...qhd,...khd->...hqk", query_states, key_states ) # (batch_size, num_block, n_heads, block_len, 3 * block_len) if position_bias is None: # position_bias shape: # (1, 1, n_heads, block_len, 3 * block_len) if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, 1, self.n_heads, self.block_len, 3 * self.block_len), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(self.block_len) if mask is not None: # Replace masked positions with -1e10 (according to the original implementation) mask = torch.where(mask > 0, 0.0, -1e10) # We need to adjust position bias shape to be sum with mask position_bias = position_bias + mask.transpose(1, 2) scores += position_bias # (batch_size, num_blocks, n_heads, block_len, 3 * block_len) attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) # (batch_size, num_blocks, n_heads, block_len, 3 * block_len) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) # Mask heads if we want to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_weights = attn_weights.type(value_states.dtype) attn_output = unshape(torch.einsum("...hqk,...khd->...qhd", attn_weights, value_states)) attn_output = attn_output[:, :seq_length, :] attn_output = self.o(attn_output) present_key_value_state = None outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) if output_attentions: outputs = outputs + (attn_weights,) return outputs class LongT5TransientGlobalAttention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias: bool = False) -> None: super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.local_radius = config.local_radius self.block_len = self.local_radius + 1 self.global_block_size = config.global_block_size self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh TensorFlow initialization to avoid scaling before softmax self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False # Relativen attention bias & Layer norm for global attention if self.has_relative_attention_bias: self.global_relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.global_input_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) # Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads ) # Prune linear layers self.q = prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index, dim=1) # Update hyper params self.n_heads = self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod # Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance relative_position_if_large = max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, block_length: int): """Compute binned relative position bias""" memory_position = torch.arange( 3 * block_length, dtype=torch.long, device=self.relative_attention_bias.weight.device ) context_position = memory_position[block_length:-block_length] # (block_length, 3 * block_length) relative_position = memory_position[None, :] - context_position[:, None] relative_position_bucket = self._relative_position_bucket( relative_position, # (block_length, 3 * block_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) # (block_length, 3 * block_length, num_heads) values = self.relative_attention_bias(relative_position_bucket) # (1, 1, num_heads, block_length, 3 * block_length) values = values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0) return values def compute_side_bias(self, mask: torch.Tensor, global_segment_ids: torch.Tensor) -> torch.Tensor: # (batch_size, 1, seq_len, global_seq_len) side_attention_mask = torch.eq(mask[..., None], global_segment_ids[:, None, :])[:, None, ...] attention_side_bias = torch.where(side_attention_mask > 0, 0.0, -1e10) # (batch_size, seq_len, global_seq_len) side_relative_position = _make_side_relative_position_ids(mask, self.global_block_size) side_relative_position_bucket = self._relative_position_bucket( side_relative_position, bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) # (batch_size, seq_len, global_seq_len, num_heads) side_bias = self.global_relative_attention_bias(side_relative_position_bucket) # (batch_size, num_heads, seq_len, global_seq_len) side_bias = side_bias.permute([0, 3, 1, 2]) # (batch_size, num_heads, seq_len, global_seq_len) attention_side_bias = attention_side_bias + side_bias return attention_side_bias def forward( self, hidden_states, mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, ): batch_size, seq_length = hidden_states.shape[:2] def shape(states): """projection""" return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim) def unshape(states): """reshape""" return states.contiguous().view(batch_size, -1, self.inner_dim) # Prepare components for transient-global attention # Obtain block_ids and global_segment_ids # global_seq_len := seq_len // self.global_block_size # shapes: (batch_size, seq_len) & (batch_size, global_seq_len) block_ids, global_segment_ids = _make_global_fixed_block_ids( mask if mask is not None else torch.ones(hidden_states.shape[:-1]), self.global_block_size, ) # Create global inputs _global_seq_len = global_segment_ids.shape[-1] global_inputs = _create_global_aggregates(hidden_states, block_ids, _global_seq_len) global_inputs = self.global_input_layer_norm(global_inputs) # get query states -> (batch_size, seq_length, n_heads, dim_per_head) query_states = shape(self.q(hidden_states)) key_states = shape(self.k(hidden_states)) value_states = shape(self.v(hidden_states)) # Get global/side key/value states shape: (batch_size, global_seq_len, n_heads, dim_per_head) side_key_states = shape(self.k(global_inputs)) side_value_states = shape(self.v(global_inputs)) # Split into blocks -> (batch_size, num_blocks, block_len, n_heads, dim_per_head) query_states = _split_into_blocks(query_states, self.block_len, dim=1) key_states = _split_into_blocks(key_states, self.block_len, dim=1) value_states = _split_into_blocks(value_states, self.block_len, dim=1) # Concatenate 3 blocks for keys and values -> (batch_size, num_blocks, 3 * block_len, n_heads, dim_per_head) key_states = _concatenate_3_blocks(key_states, block_dim=1, sequence_dim=2) value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2) # Tile side inputs across local key/value blocks # New shape: (batch_size, num_blocks, global_seq_len, n_heads, dim_per_head) reps = [1] * (side_key_states.ndim + 1) reps[1] = key_states.shape[1] side_key_states = side_key_states.unsqueeze(1).repeat(reps) side_value_states = side_value_states.unsqueeze(1).repeat(reps) # Concatenate "local" and "side"/"global" key/value states to allow each token to attend global aggregated ones # New shape: (batch_size, num_blocks, 3 * block_len + global_seq_len, n_heads, dim_per_head) key_states = torch.cat([key_states, side_key_states], dim=2) value_states = torch.cat([value_states, side_value_states], dim=2) # Compute scores -> (batch_size, num_block, n_heads, block_len, 3 * block_len + global_seq_len) scores = torch.einsum("...qhd,...khd->...hqk", query_states, key_states) if mask is not None: # We need to adjust position bias shape to be sum with mask local_attention_mask = _get_local_attention_mask(mask, self.block_len, hidden_states.device) # Replace masked positions with -10_000 (according to the original implementation) local_attention_mask = torch.where(local_attention_mask > 0, 0.0, -1e10) else: local_attention_mask = None if position_bias is None: # position_bias shape: # (1, 1, n_heads, block_len, 3 * block_len) if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, 1, self.n_heads, self.block_len, 3 * self.block_len), device=scores.device, dtype=scores.dtype, ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(self.block_len) if local_attention_mask is not None: # (batch_size, 1, n_heads, block_len, 3 * block_len) position_bias = position_bias + local_attention_mask.transpose(1, 2) position_bias = position_bias.type(scores.dtype) # Calculate global/side bias - shape: # (batch_size, num_heads, seq_len, global_seq_len) if mask is None: mask = torch.ones(batch_size, seq_length) # (batch_size, num_heads, seq_len, global_seq_len) side_position_bias = self.compute_side_bias(mask, global_segment_ids) # (batch_size, num_blocks, num_heads, block_len, global_seq_len) side_position_bias = _split_into_blocks(side_position_bias, self.block_len, dim=-2).transpose(1, 2) side_position_bias = side_position_bias.type(scores.dtype).to(scores.device) # (batch_size, num_blocks, num_heads, block_len, 3 * block_len + global_seq_len) position_bias = torch.cat([position_bias, side_position_bias], dim=-1) scores += position_bias # (batch_size, num_blocks, n_heads, block_len, 3 * block_len + global_seq_len) attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) # Mask heads if we want to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_weights = attn_weights.type(value_states.dtype) attn_output = unshape(torch.einsum("...hqk,...khd->...qhd", attn_weights, value_states)) attn_output = attn_output[:, :seq_length, :] attn_output = self.o(attn_output) present_key_value_state = None outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) if output_attentions: outputs = outputs + (attn_weights,) return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5->LongT5 class LongT5LayerSelfAttention(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.SelfAttention = LongT5Attention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs class LongT5LayerLocalSelfAttention(nn.Module): """Local self attention used in encoder""" def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.LocalSelfAttention = LongT5LocalAttention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, **kwargs: Any, # to accept past_key_value and use_cache kwargs ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.LocalSelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs class LongT5LayerTransientGlobalSelfAttention(nn.Module): """Transient-Global self attention used in encoder""" def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.TransientGlobalSelfAttention = LongT5TransientGlobalAttention( config, has_relative_attention_bias=has_relative_attention_bias ) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, **kwargs: Any, # to accept past_key_value and use_cache kwargs ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.TransientGlobalSelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerCrossAttention with T5->LongT5 class LongT5LayerCrossAttention(nn.Module): def __init__(self, config): super().__init__() self.EncDecAttention = LongT5Attention(config, has_relative_attention_bias=False) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, query_length=None, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention( normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, ) layer_output = hidden_states + self.dropout(attention_output[0]) outputs = (layer_output,) + attention_output[1:] # add attentions if we output them return outputs class LongT5Block(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder if config.is_decoder: attention_layer = LongT5LayerSelfAttention elif config.encoder_attention_type == "local": attention_layer = LongT5LayerLocalSelfAttention elif config.encoder_attention_type == "transient-global": attention_layer = LongT5LayerTransientGlobalSelfAttention else: raise ValueError( "For encoder attention mechanism, either `local` or `transient-global` attention type is expected, " f"but got {config.encoder_attention_type}." ) self.layer = nn.ModuleList() self.layer.append(attention_layer(config, has_relative_attention_bias=has_relative_attention_bias)) if self.is_decoder: self.layer.append(LongT5LayerCrossAttention(config)) self.layer.append(LongT5LayerFF(config)) def forward( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, return_dict=True, ): if past_key_value is not None: if not self.is_decoder: logger.warning("`past_key_values` is passed to the encoder. Please make sure this is intended.") expected_num_past_key_values = 2 if encoder_hidden_states is None else 4 if len(past_key_value) != expected_num_past_key_values: raise ValueError( f"There should be {expected_num_past_key_values} past states. " f"{'2 (past / key) for cross attention. ' if expected_num_past_key_values == 4 else ''}" f"Got {len(past_key_value)} past key / value states" ) self_attn_past_key_value = past_key_value[:2] cross_attn_past_key_value = past_key_value[2:] else: self_attn_past_key_value, cross_attn_past_key_value = None, None self_attention_outputs = self.layer[0]( hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=self_attn_past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states, present_key_value_state = self_attention_outputs[:2] attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights # clamp inf values to enable fp16 inference - check https://github.com/huggingface/transformers/pull/19229/ if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) do_cross_attention = self.is_decoder and encoder_hidden_states is not None if do_cross_attention: # the actual query length is unknown for cross attention # if using past key value states. Need to inject it here if present_key_value_state is not None: query_length = present_key_value_state[0].shape[2] else: query_length = None cross_attention_outputs = self.layer[1]( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, query_length=query_length, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = cross_attention_outputs[0] # clamp inf values to enable fp16 inference - check https://github.com/huggingface/transformers/pull/19229/ if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) # Combine self attn and cross attn key value states if present_key_value_state is not None: present_key_value_state = present_key_value_state + cross_attention_outputs[1] # Keep cross-attention outputs and relative position weights attention_outputs = attention_outputs + cross_attention_outputs[2:] # Apply Feed Forward layer hidden_states = self.layer[-1](hidden_states) # clamp inf values to enable fp16 inference - check https://github.com/huggingface/transformers/pull/19229/ if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if use_cache: outputs = outputs + (present_key_value_state,) + attention_outputs else: outputs = outputs + attention_outputs return outputs # hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) class LongT5PreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = LongT5Config base_model_prefix = "transformer" supports_gradient_checkpointing = True @property # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel.dummy_inputs def dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK) dummy_inputs = { "decoder_input_ids": input_ids, "input_ids": input_ids, "decoder_attention_mask": input_mask, } return dummy_inputs def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor # Used for testing weights initialization if isinstance(module, LongT5LayerNorm): module.weight.data.fill_(factor * 1.0) elif isinstance(module, (LongT5Model, LongT5ForConditionalGeneration, LongT5EncoderModel)): # Mesh TensorFlow embeddings initialization # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0) elif isinstance(module, LongT5DenseActDense): # Mesh TensorFlow FF initialization # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56 # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89 module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi, "bias") and module.wi.bias is not None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, LongT5DenseGatedActDense): module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None: module.wi_0.bias.data.zero_() module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None: module.wi_1.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, (LongT5Attention, LongT5LocalAttention, LongT5TransientGlobalAttention)): # Mesh TensorFlow attention initialization to avoid scaling before softmax # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 d_model = self.config.d_model key_value_proj_dim = self.config.d_kv n_heads = self.config.num_heads module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)) module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5)) if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5)) if isinstance(module, LongT5TransientGlobalAttention): module.global_relative_attention_bias.weight.data.normal_( mean=0.0, std=factor * ((d_model) ** -0.5) ) # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._set_gradient_checkpointing with T5->LongT5 def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (LongT5Attention, LongT5Stack)): module.gradient_checkpointing = value # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right with T5->LongT5 def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id assert decoder_start_token_id is not None, ( "self.model.config.decoder_start_token_id has to be defined. In LongT5 it is usually set to the" " pad_token_id. See LongT5 docs for more information" ) # shift inputs to the right if is_torch_fx_proxy(input_ids): # Item assignment is not supported natively for proxies. shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined." # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids class LongT5Stack(LongT5PreTrainedModel): def __init__(self, config, embed_tokens=None): super().__init__(config) self.embed_tokens = embed_tokens self.is_decoder = config.is_decoder self.local_radius = config.local_radius self.block_len = self.local_radius + 1 self.block = nn.ModuleList( [LongT5Block(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)] ) self.final_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) # Initialize weights and apply final processing self.post_init() self.gradient_checkpointing = False # Copied from transformers.models.t5.modeling_t5.T5Stack.get_input_embeddings def get_input_embeddings(self): return self.embed_tokens # Copied from transformers.models.t5.modeling_t5.T5Stack.set_input_embeddings def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError( f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time" ) elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError(f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds") if inputs_embeds is None: assert self.embed_tokens is not None, "You have to initialize the model with valid token embeddings" inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape # required mask seq length can be calculated via length of past mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length if use_cache is True: assert self.is_decoder, f"`use_cache` can only be set to `True` if {self} is used as a decoder" if attention_mask is None: attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None: encoder_seq_length = encoder_hidden_states.shape[1] encoder_attention_mask = torch.ones( batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long ) # initialize past_key_values with `None` if past does not exist if past_key_values is None: past_key_values = [None] * len(self.block) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. # We use local attention in encoder self-attention, otherwise standard self & cross attentions are used if self.is_decoder: extended_attention_mask = self.get_extended_attention_mask( attention_mask, input_shape, inputs_embeds.device ) elif self.config.encoder_attention_type == "local": extended_attention_mask = _get_local_attention_mask(attention_mask, self.block_len, inputs_embeds.device) else: # we need to use both local attention mask and standard extended mask for transient-global attention extended_attention_mask = attention_mask # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed head_mask = self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers) present_key_value_states = () if use_cache else None all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if (output_attentions and self.is_decoder) else None position_bias = None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds) for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)): layer_head_mask = head_mask[i] cross_attn_layer_head_mask = cross_attn_head_mask[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: if use_cache: use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return tuple(module(*inputs, use_cache, output_attentions)) return custom_forward layer_outputs = checkpoint( create_custom_forward(layer_module), hidden_states, extended_attention_mask, position_bias, encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias, layer_head_mask, cross_attn_layer_head_mask, None, # past_key_value is always None with gradient checkpointing ) else: layer_outputs = layer_module( hidden_states, attention_mask=extended_attention_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) # layer_outputs is a tuple with: # hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) if use_cache is False: layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:] hidden_states, present_key_value_state = layer_outputs[:2] # We share the position biases between the layers - the first layer store them # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights), # (cross-attention position bias), (cross-attention weights) position_bias = layer_outputs[2] if self.is_decoder and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3] # append next layer key value states if use_cache: present_key_value_states = present_key_value_states + (present_key_value_state,) if output_attentions: all_attentions = all_attentions + (layer_outputs[3],) if self.is_decoder: all_cross_attentions = all_cross_attentions + (layer_outputs[5],) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, present_key_value_states, all_hidden_states, all_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) LONGT5_START_DOCSTRING = r""" The LongT5 model was proposed in [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung and Yinfei Yang. It's an encoder-decoder transformer pre-trained in a text-to-text denoising generative setting. LongT5 model is an extension of T5 model, and it enables using one of the two different efficient attention mechanisms - (1) Local attention, or (2) Transient-Global attention. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`LongT5Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ LONGT5_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [LONGT5 Training](./longt5#training). attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) LONGT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [LONGT5 Training](./longt5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ LONGT5_ENCODER_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. To know more on how to prepare `input_ids` for pretraining take a look a [LONGT5 Training](./longt5#training). attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ # Warning message for FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask __HEAD_MASK_WARNING_MSG = """ The input argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently, `decoder_head_mask` is set to copy `head_mask`, but this feature is deprecated and will be removed in future versions. If you do not want to use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers, num_heads)`. """ @add_start_docstrings( "The bare LONGT5 Model transformer outputting raw hidden-states without any specific head on top.", LONGT5_START_DOCSTRING, ) class LongT5Model(LongT5PreTrainedModel): _keys_to_ignore_on_load_missing = [ r"encoder.embed_tokens.weight", r"decoder.embed_tokens.weight", ] _keys_to_ignore_on_load_unexpected = [ r"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight", ] def __init__(self, config: LongT5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = LongT5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = LongT5Stack(decoder_config, self.shared) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.Tensor] = None, decoder_inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]: r""" Returns: Example: ```python >>> from transformers import T5Tokenizer, LongT5Model >>> tokenizer = T5Tokenizer.from_pretrained("google/long-t5-local-base") >>> model = LongT5Model.from_pretrained("google/long-t5-local-base") >>> # Let's try a very long encoder input. >>> input_ids = tokenizer( ... 100 * "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 >>> # forward pass >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state ```""" use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode if needed (training, first prediction pass) if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings("""LONGT5 Model with a `language modeling` head on top.""", LONGT5_START_DOCSTRING) class LongT5ForConditionalGeneration(LongT5PreTrainedModel): _keys_to_ignore_on_load_missing = [ r"encoder.embed_tokens.weight", r"decoder.embed_tokens.weight", r"lm_head.weight", ] _keys_to_ignore_on_load_unexpected = [ r"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight", ] def __init__(self, config: LongT5Config): super().__init__(config) self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = LongT5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = LongT5Stack(decoder_config, self.shared) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def get_output_embeddings(self): return self.lm_head def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` Returns: Examples: ```python >>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("Stancld/longt5-tglobal-large-16384-pubmed-3k_steps") >>> model = LongT5ForConditionalGeneration.from_pretrained( ... "Stancld/longt5-tglobal-large-16384-pubmed-3k_steps" ... ) >>> # Let's try a very long input. >>> inputs = tokenizer(100 * "studies have shown that owning a dog is good for you ", return_tensors="pt") >>> input_ids = inputs.input_ids >>> outputs = model.generate(input_ids) >>> print(tokenizer.decode(outputs[0], skip_special_tokens=True)) abstractthe aim of this article is to provide an overview of the literature on the role of dog ```""" use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode if needed (training, first prediction pass) if encoder_outputs is None: # Convert encoder inputs in embeddings if needed encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None: # get decoder inputs from shifting lm labels to the right decoder_input_ids = self._shift_right(labels) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: # Rescale output before projecting on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output * (self.model_dim**-0.5) lm_logits = self.lm_head(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666 if not return_dict: output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs return ((loss,) + output) if loss is not None else output return Seq2SeqLMOutput( loss=loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past=None, attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs ): # cut decoder_input_ids if past is used if past is not None: input_ids = input_ids[:, -1:] return { "decoder_input_ids": input_ids, "past_key_values": past, "encoder_outputs": encoder_outputs, "attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, "use_cache": use_cache, } def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels) def _reorder_cache(self, past, beam_idx): # if decoder past is not included in output # speedy decoding is disabled and no need to reorder if past is None: logger.warning("You might want to consider setting `use_cache=True` to speed up decoding") return past reordered_decoder_past = () for layer_past_states in past: # get the correct batch idx from layer past batch dim # batch dim of `past` is at 2nd position reordered_layer_past_states = () for layer_past_state in layer_past_states: # need to set correct `past` for each of the four key / value states reordered_layer_past_states = reordered_layer_past_states + ( layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)), ) assert reordered_layer_past_states[0].shape == layer_past_states[0].shape assert len(reordered_layer_past_states) == len(layer_past_states) reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,) return reordered_decoder_past @add_start_docstrings( "The bare LONGT5 Model transformer outputting encoder's raw hidden-states without any specific head on top.", LONGT5_START_DOCSTRING, ) class LongT5EncoderModel(LongT5PreTrainedModel): _keys_to_ignore_on_load_missing = [r"encoder.embed_tokens.weight"] def __init__(self, config: LongT5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = LongT5Stack(encoder_config, self.shared) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(LONGT5_ENCODER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]: r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("google/long-t5-local-base") >>> model = LongT5EncoderModel.from_pretrained("google/long-t5-local-base") >>> input_ids = tokenizer( ... 100 * "Studies have been shown that owning a dog is good for you ", return_tensors="pt" ... ).input_ids # Batch size 1 >>> outputs = model(input_ids=input_ids) >>> last_hidden_states = outputs.last_hidden_state ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) return encoder_outputs
# coding=utf-8 # Copyright 2022 Google LLC., LongT5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch LongT5 model.""" import copy import math import warnings from typing import Any, List, Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from torch.utils.checkpoint import checkpoint from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import ALL_LAYERNORM_LAYERS, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_model_forward, is_torch_fx_proxy, logging, replace_return_docstrings, ) from .configuration_longt5 import LongT5Config logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "LongT5Config" _TOKENIZER_FOR_DOC = "T5Tokenizer" _CHECKPOINT_FOR_DOC = "google/long-t5-local-base" # TODO: Update before the merge LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST = [ "google/long-t5-local-base", "google/long-t5-local-large", "google/long-t5-tglobal-base", "google/long-t5-tglobal-large", ] def _pad_to_multiple(x: torch.Tensor, block_len: int, dim: int, pad_value: int = 0) -> torch.Tensor: """Pad a tensor so that a sequence length will be a multiple of `block_len`""" pad_len = -x.shape[dim] % block_len # Handle cases when an empty input sequence is given if not all(x.shape): new_shape = list(x.shape) new_shape[dim] += pad_len return torch.zeros(new_shape, dtype=x.dtype) pad = [(0, 0)] * x.ndim pad[dim] = (0, pad_len) pad = sum(pad[::-1], ()) x = nn.functional.pad(x, pad=pad, mode="constant", value=pad_value) return x def _split_into_blocks(x: torch.Tensor, block_len: int, dim: int) -> torch.Tensor: """Split an input tensor into blocks of a given `block_len` along the given `dim`. If the dimension length is not a multiple of `block_len`, it will be padded first with selected `pad_value`. """ # pad tensor to multiple of block_len if x.shape[dim] % block_len != 0: x = _pad_to_multiple(x, block_len, dim, pad_value=0) num_blocks = x.shape[dim] // block_len output_shape = x.shape[:dim] + (num_blocks, block_len) + x.shape[(dim + 1) :] # If 0 is in output_shape, we cannot apply reshape because of incompatibility with ONNX conversion if 0 in output_shape: return torch.empty(output_shape, dtype=x.dtype, device=x.device) return x.reshape(output_shape) def _concatenate_3_blocks(x: torch.Tensor, block_dim: int, sequence_dim: int, pad_value: int = 0) -> torch.Tensor: """Concatenate three consecutive blocks for each input block for local attentiont. For more information, see: https://arxiv.org/pdf/2112.07916.pdf. """ num_blocks = x.shape[block_dim] pad = [(0, 0)] * x.ndim pad[block_dim] = (1, 1) pad = sum(pad[::-1], ()) # [batch_size, num_blocks, block_len] -> [batch_size, num_blocks + 2, block_len] x = nn.functional.pad(x, pad=pad, mode="constant", value=pad_value) blocks_list: List[torch.Tensor] = [] for i in range(3): # We use indexing approach here: # https://numpy.org/doc/stable/user/basics.indexing.html#dealing-with-variable-numbers-of-indices-within-programs indices = [slice(0, None)] * x.ndim indices[block_dim] = slice(i, i + num_blocks) indices = tuple(indices) blocks_list.append(x[indices]) # [batch_size, num_blocks, 3 * block_len, ...] return torch.cat(blocks_list, dim=sequence_dim) def _make_3block_relative_position_ids(block_len: int) -> torch.Tensor: """Makes 3-blocked relative position ids for local attention.""" position_ids = torch.arange(3 * block_len, dtype=torch.int32) center_position_ids = position_ids[block_len:-block_len] # [block_len, 3 * block_len] relative_position_ids = position_ids.unsqueeze(0) - center_position_ids.unsqueeze(1) return relative_position_ids def _mask_local_attention_mask(local_attention_mask: torch.Tensor, block_len: int) -> torch.Tensor: """Mask local attention mask to enforce that tokens are not allowed to attend tokens farther than ``local_radius.""" relative_position_ids = _make_3block_relative_position_ids(block_len) locality_mask = torch.abs(relative_position_ids) < block_len locality_mask = locality_mask[None, None, :, :] locality_mask = locality_mask.to(local_attention_mask.device) return torch.logical_and(local_attention_mask, locality_mask) def _get_local_attention_mask(attention_mask: torch.Tensor, block_len: int, device: torch.device) -> torch.Tensor: """Prepare attention mask to be applied for a local attention.""" # [batch_size, num_blocks, block_len] _blocked_attention_mask = _split_into_blocks(attention_mask, block_len, dim=1) # [batch_size, num_block, 3 * block_len] _3blocked_attention_mask = _concatenate_3_blocks(_blocked_attention_mask, block_dim=1, sequence_dim=2) _blocked_attention_mask = _blocked_attention_mask.unsqueeze(-1) _3blocked_attention_mask = _3blocked_attention_mask.unsqueeze(-2) # [batch_size, num_block, block_len, 3 * block_len] local_attention_mask = torch.logical_and(_blocked_attention_mask, _3blocked_attention_mask) local_attention_mask = _mask_local_attention_mask(local_attention_mask, block_len) # [batch_size, 1, num_block, block_len, 3 * block_len] return local_attention_mask.unsqueeze(1).to(device) def _make_global_fixed_block_ids( attention_mask: torch.Tensor, global_block_size: int ) -> Tuple[torch.Tensor, torch.Tensor]: """Obtain the "fixed block" global id corresponding to each input token. This implementation is a simlified version of the original Flaxformr implementation adopted from: https://github.com/google/flaxformer/blob/main/flaxformer/architectures/longt5/long_attention.py. In our scenario, as we use this strategy only for a decoder, orphan tokens, i.e. those tokens which do not make for the whole fixed block, are assigned to the preceding block. Padding tokens from the original sequence are represented by -1. """ batch_size, seq_len = attention_mask.shape[:2] def handle_orphan_tokens(block_ids: torch.Tensor) -> torch.Tensor: block_ends = (torch.arange(seq_len) % global_block_size) == global_block_size - 1 block_ends = block_ends.to(block_ids.device) true_block_ends = torch.logical_and(block_ends, block_ids >= 0) full_blocks = true_block_ends.sum(-1).unsqueeze(-1).type(block_ids.dtype) - 1 block_ids = torch.where(block_ids < full_blocks, block_ids, full_blocks) return block_ids fixed_block_mask = torch.ones_like(attention_mask, device=attention_mask.device) / global_block_size fixed_block_mask = torch.cumsum(fixed_block_mask, axis=1) - fixed_block_mask mask = torch.where(attention_mask != 0.0, 1.0, -1000.0).type(attention_mask.dtype) global_block_ids = torch.floor(mask + fixed_block_mask - 1.0).type(attention_mask.dtype) _global_block_ids_lower_bound = torch.tensor(-1, dtype=global_block_ids.dtype, device=global_block_ids.device) global_block_ids = torch.where( global_block_ids > _global_block_ids_lower_bound, global_block_ids, _global_block_ids_lower_bound ) # set padding tokens to -1 global_block_ids = (global_block_ids * attention_mask) + (attention_mask - 1) # [batch_size, seq_len] global_block_ids = handle_orphan_tokens(global_block_ids) num_globals = seq_len // global_block_size # [batch_size, seq_len // global_block_size] if num_globals > 0: _sequence_block_ids_max = torch.max(global_block_ids, dim=-1).values.repeat(num_globals, 1).transpose(0, 1) else: _sequence_block_ids_max = torch.zeros( batch_size, 0, dtype=global_block_ids.dtype, device=global_block_ids.device ) global_segment_ids = torch.cumsum(torch.ones(batch_size, num_globals), dim=-1) - 1 global_segment_ids = global_segment_ids.to(attention_mask.device) global_segment_ids = torch.where(global_segment_ids <= _sequence_block_ids_max, 1, 0) return global_block_ids.type(torch.int), global_segment_ids.type(torch.int) def _make_side_relative_position_ids(attention_mask: torch.Tensor, global_block_size: int) -> torch.Tensor: """Create the relative position tensor for local -> global attention.""" block_ids, global_segment_ids = _make_global_fixed_block_ids(attention_mask, global_block_size) global_seq_len = global_segment_ids.shape[-1] global_positions = torch.arange(global_seq_len, device=block_ids.device) side_relative_position = global_positions - block_ids[..., None] return side_relative_position.type(torch.int64) def _create_global_aggregates( hidden_states: torch.Tensor, block_ids: torch.Tensor, global_seq_len: int ) -> torch.Tensor: """Compute individual block aggregates by summing over individual blocks.""" # (batch..., seq_len, global_seq_len)) block_ids = block_ids.where( block_ids >= 0, torch.tensor(global_seq_len, dtype=block_ids.dtype, device=block_ids.device) ) one_hot_block_ids = nn.functional.one_hot(block_ids.type(torch.int64), global_seq_len + 1)[:, :, :-1] return torch.einsum("...nd,...ng->...gd", hidden_states, one_hot_block_ids.type(hidden_states.dtype)) # Copied from transformers.models.t5.modeling_t5.T5LayerNorm with T5->LongT5 class LongT5LayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ Construct a layernorm module in the LongT5 style. No bias and no subtraction of mean. """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): # LongT5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) # convert into half-precision if necessary if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states try: from apex.normalization import FusedRMSNorm LongT5LayerNorm = FusedRMSNorm # noqa logger.info("Discovered apex.normalization.FusedRMSNorm - will use it instead of LongT5LayerNorm") except ImportError: # using the normal LongT5LayerNorm pass except Exception: logger.warning("discovered apex but it failed to load, falling back to LongT5LayerNorm") pass ALL_LAYERNORM_LAYERS.append(LongT5LayerNorm) # Copied from transformers.models.t5.modeling_t5.T5DenseActDense with T5->LongT5 class LongT5DenseActDense(nn.Module): def __init__(self, config: LongT5Config): super().__init__() self.wi = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5->LongT5 class LongT5DenseGatedActDense(nn.Module): def __init__(self, config: LongT5Config): super().__init__() self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5LayerFF with T5->LongT5 class LongT5LayerFF(nn.Module): def __init__(self, config: LongT5Config): super().__init__() if config.is_gated_act: self.DenseReluDense = LongT5DenseGatedActDense(config) else: self.DenseReluDense = LongT5DenseActDense(config) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5Attention with T5->LongT5 class LongT5Attention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh TensorFlow initialization to avoid scaling before softmax self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads ) # Prune linear layers self.q = prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index, dim=1) # Update hyper params self.n_heads = self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance relative_position_if_large = max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, query_length, key_length, device=None): """Compute binned relative position bias""" if device is None: device = self.relative_attention_bias.weight.device context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None] memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :] relative_position = memory_position - context_position # shape (query_length, key_length) relative_position_bucket = self._relative_position_bucket( relative_position, # shape (query_length, key_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads) values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length) return values def forward( self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_value=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, ): """ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). """ # Input is (batch_size, seq_length, dim) # Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length) # past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head) batch_size, seq_length = hidden_states.shape[:2] real_seq_length = seq_length if past_key_value is not None: assert ( len(past_key_value) == 2 ), f"past_key_value should have 2 past states: keys and values. Got { len(past_key_value)} past states" real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length key_length = real_seq_length if key_value_states is None else key_value_states.shape[1] def shape(states): """projection""" return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) def unshape(states): """reshape""" return states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim) def project(hidden_states, proj_layer, key_value_states, past_key_value): """projects hidden states correctly to key/query states""" if key_value_states is None: # self-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(hidden_states)) elif past_key_value is None: # cross-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(key_value_states)) if past_key_value is not None: if key_value_states is None: # self-attn # (batch_size, n_heads, key_length, dim_per_head) hidden_states = torch.cat([past_key_value, hidden_states], dim=2) else: # cross-attn hidden_states = past_key_value return hidden_states # get query states query_states = shape(self.q(hidden_states)) # (batch_size, n_heads, seq_length, dim_per_head) # get key/value states key_states = project( hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is not None else None ) value_states = project( hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is not None else None ) # compute scores scores = torch.matmul( query_states, key_states.transpose(3, 2) ) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9 if position_bias is None: if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device) # if key and values are already calculated # we want only the last query position bias if past_key_value is not None: position_bias = position_bias[:, :, -hidden_states.size(1) :, :] if mask is not None: position_bias = position_bias + mask # (batch_size, n_heads, seq_length, key_length) if self.pruned_heads: mask = torch.ones(position_bias.shape[1]) mask[list(self.pruned_heads)] = 0 position_bias_masked = position_bias[:, mask.bool()] else: position_bias_masked = position_bias scores += position_bias_masked attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as( scores ) # (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.dropout( attn_weights, p=self.dropout, training=self.training ) # (batch_size, n_heads, seq_length, key_length) # Mask heads if we want to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_output = unshape(torch.matmul(attn_weights, value_states)) # (batch_size, seq_length, dim) attn_output = self.o(attn_output) present_key_value_state = (key_states, value_states) if (self.is_decoder and use_cache) else None outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) if output_attentions: outputs = outputs + (attn_weights,) return outputs class LongT5LocalAttention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias: bool = False) -> None: super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.local_radius = config.local_radius self.block_len = self.local_radius + 1 self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh TensorFlow initialization to avoid scaling before softmax self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False # Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads ) # Prune linear layers self.q = prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index, dim=1) # Update hyper params self.n_heads = self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod # Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance relative_position_if_large = max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, block_length: int): """Compute binned relative position bias""" memory_position = torch.arange( 3 * block_length, dtype=torch.long, device=self.relative_attention_bias.weight.device ) context_position = memory_position[block_length:-block_length] # (block_length, 3 * block_length) relative_position = memory_position[None, :] - context_position[:, None] relative_position_bucket = self._relative_position_bucket( relative_position, # (block_length, 3 * block_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) # (block_length, 3 * block_length, num_heads) values = self.relative_attention_bias(relative_position_bucket) # (1, 1, num_heads, block_length, 3 * block_length) values = values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0) return values def forward( self, hidden_states, mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, ): batch_size, seq_length = hidden_states.shape[:2] def shape(states): """projection""" return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim) def unshape(states): """reshape""" return states.contiguous().view(batch_size, -1, self.inner_dim) # get query/key/value states -> (batch_size, seq_length, n_heads, dim_per_head) query_states = shape(self.q(hidden_states)) key_states = shape(self.k(hidden_states)) value_states = shape(self.v(hidden_states)) # Split into blocks -> (batch_size, num_blocks, block_len, n_heads, dim_per_head) query_states = _split_into_blocks(query_states, self.block_len, dim=1) key_states = _split_into_blocks(key_states, self.block_len, dim=1) value_states = _split_into_blocks(value_states, self.block_len, dim=1) # Concatenate 3 blocks for keys and values -> (batch_size, num_blocks, 3 * block_len, n_heads, dim_per_head) key_states = _concatenate_3_blocks(key_states, block_dim=1, sequence_dim=2) value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2) # Compute scores scores = torch.einsum( "...qhd,...khd->...hqk", query_states, key_states ) # (batch_size, num_block, n_heads, block_len, 3 * block_len) if position_bias is None: # position_bias shape: # (1, 1, n_heads, block_len, 3 * block_len) if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, 1, self.n_heads, self.block_len, 3 * self.block_len), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(self.block_len) if mask is not None: # Replace masked positions with -1e10 (according to the original implementation) mask = torch.where(mask > 0, 0.0, -1e10) # We need to adjust position bias shape to be sum with mask position_bias = position_bias + mask.transpose(1, 2) scores += position_bias # (batch_size, num_blocks, n_heads, block_len, 3 * block_len) attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) # (batch_size, num_blocks, n_heads, block_len, 3 * block_len) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) # Mask heads if we want to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_weights = attn_weights.type(value_states.dtype) attn_output = unshape(torch.einsum("...hqk,...khd->...qhd", attn_weights, value_states)) attn_output = attn_output[:, :seq_length, :] attn_output = self.o(attn_output) present_key_value_state = None outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) if output_attentions: outputs = outputs + (attn_weights,) return outputs class LongT5TransientGlobalAttention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias: bool = False) -> None: super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.local_radius = config.local_radius self.block_len = self.local_radius + 1 self.global_block_size = config.global_block_size self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh TensorFlow initialization to avoid scaling before softmax self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False # Relativen attention bias & Layer norm for global attention if self.has_relative_attention_bias: self.global_relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.global_input_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) # Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads ) # Prune linear layers self.q = prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index, dim=1) # Update hyper params self.n_heads = self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod # Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance relative_position_if_large = max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, block_length: int): """Compute binned relative position bias""" memory_position = torch.arange( 3 * block_length, dtype=torch.long, device=self.relative_attention_bias.weight.device ) context_position = memory_position[block_length:-block_length] # (block_length, 3 * block_length) relative_position = memory_position[None, :] - context_position[:, None] relative_position_bucket = self._relative_position_bucket( relative_position, # (block_length, 3 * block_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) # (block_length, 3 * block_length, num_heads) values = self.relative_attention_bias(relative_position_bucket) # (1, 1, num_heads, block_length, 3 * block_length) values = values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0) return values def compute_side_bias(self, mask: torch.Tensor, global_segment_ids: torch.Tensor) -> torch.Tensor: # (batch_size, 1, seq_len, global_seq_len) side_attention_mask = torch.eq(mask[..., None], global_segment_ids[:, None, :])[:, None, ...] attention_side_bias = torch.where(side_attention_mask > 0, 0.0, -1e10) # (batch_size, seq_len, global_seq_len) side_relative_position = _make_side_relative_position_ids(mask, self.global_block_size) side_relative_position_bucket = self._relative_position_bucket( side_relative_position, bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) # (batch_size, seq_len, global_seq_len, num_heads) side_bias = self.global_relative_attention_bias(side_relative_position_bucket) # (batch_size, num_heads, seq_len, global_seq_len) side_bias = side_bias.permute([0, 3, 1, 2]) # (batch_size, num_heads, seq_len, global_seq_len) attention_side_bias = attention_side_bias + side_bias return attention_side_bias def forward( self, hidden_states, mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, ): batch_size, seq_length = hidden_states.shape[:2] def shape(states): """projection""" return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim) def unshape(states): """reshape""" return states.contiguous().view(batch_size, -1, self.inner_dim) # Prepare components for transient-global attention # Obtain block_ids and global_segment_ids # global_seq_len := seq_len // self.global_block_size # shapes: (batch_size, seq_len) & (batch_size, global_seq_len) block_ids, global_segment_ids = _make_global_fixed_block_ids( mask if mask is not None else torch.ones(hidden_states.shape[:-1]), self.global_block_size, ) # Create global inputs _global_seq_len = global_segment_ids.shape[-1] global_inputs = _create_global_aggregates(hidden_states, block_ids, _global_seq_len) global_inputs = self.global_input_layer_norm(global_inputs) # get query states -> (batch_size, seq_length, n_heads, dim_per_head) query_states = shape(self.q(hidden_states)) key_states = shape(self.k(hidden_states)) value_states = shape(self.v(hidden_states)) # Get global/side key/value states shape: (batch_size, global_seq_len, n_heads, dim_per_head) side_key_states = shape(self.k(global_inputs)) side_value_states = shape(self.v(global_inputs)) # Split into blocks -> (batch_size, num_blocks, block_len, n_heads, dim_per_head) query_states = _split_into_blocks(query_states, self.block_len, dim=1) key_states = _split_into_blocks(key_states, self.block_len, dim=1) value_states = _split_into_blocks(value_states, self.block_len, dim=1) # Concatenate 3 blocks for keys and values -> (batch_size, num_blocks, 3 * block_len, n_heads, dim_per_head) key_states = _concatenate_3_blocks(key_states, block_dim=1, sequence_dim=2) value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2) # Tile side inputs across local key/value blocks # New shape: (batch_size, num_blocks, global_seq_len, n_heads, dim_per_head) reps = [1] * (side_key_states.ndim + 1) reps[1] = key_states.shape[1] side_key_states = side_key_states.unsqueeze(1).repeat(reps) side_value_states = side_value_states.unsqueeze(1).repeat(reps) # Concatenate "local" and "side"/"global" key/value states to allow each token to attend global aggregated ones # New shape: (batch_size, num_blocks, 3 * block_len + global_seq_len, n_heads, dim_per_head) key_states = torch.cat([key_states, side_key_states], dim=2) value_states = torch.cat([value_states, side_value_states], dim=2) # Compute scores -> (batch_size, num_block, n_heads, block_len, 3 * block_len + global_seq_len) scores = torch.einsum("...qhd,...khd->...hqk", query_states, key_states) if mask is not None: # We need to adjust position bias shape to be sum with mask local_attention_mask = _get_local_attention_mask(mask, self.block_len, hidden_states.device) # Replace masked positions with -10_000 (according to the original implementation) local_attention_mask = torch.where(local_attention_mask > 0, 0.0, -1e10) else: local_attention_mask = None if position_bias is None: # position_bias shape: # (1, 1, n_heads, block_len, 3 * block_len) if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, 1, self.n_heads, self.block_len, 3 * self.block_len), device=scores.device, dtype=scores.dtype, ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(self.block_len) if local_attention_mask is not None: # (batch_size, 1, n_heads, block_len, 3 * block_len) position_bias = position_bias + local_attention_mask.transpose(1, 2) position_bias = position_bias.type(scores.dtype) # Calculate global/side bias - shape: # (batch_size, num_heads, seq_len, global_seq_len) if mask is None: mask = torch.ones(batch_size, seq_length) # (batch_size, num_heads, seq_len, global_seq_len) side_position_bias = self.compute_side_bias(mask, global_segment_ids) # (batch_size, num_blocks, num_heads, block_len, global_seq_len) side_position_bias = _split_into_blocks(side_position_bias, self.block_len, dim=-2).transpose(1, 2) side_position_bias = side_position_bias.type(scores.dtype).to(scores.device) # (batch_size, num_blocks, num_heads, block_len, 3 * block_len + global_seq_len) position_bias = torch.cat([position_bias, side_position_bias], dim=-1) scores += position_bias # (batch_size, num_blocks, n_heads, block_len, 3 * block_len + global_seq_len) attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) # Mask heads if we want to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_weights = attn_weights.type(value_states.dtype) attn_output = unshape(torch.einsum("...hqk,...khd->...qhd", attn_weights, value_states)) attn_output = attn_output[:, :seq_length, :] attn_output = self.o(attn_output) present_key_value_state = None outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) if output_attentions: outputs = outputs + (attn_weights,) return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5->LongT5 class LongT5LayerSelfAttention(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.SelfAttention = LongT5Attention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs class LongT5LayerLocalSelfAttention(nn.Module): """Local self attention used in encoder""" def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.LocalSelfAttention = LongT5LocalAttention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, **kwargs: Any, # to accept past_key_value and use_cache kwargs ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.LocalSelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs class LongT5LayerTransientGlobalSelfAttention(nn.Module): """Transient-Global self attention used in encoder""" def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.TransientGlobalSelfAttention = LongT5TransientGlobalAttention( config, has_relative_attention_bias=has_relative_attention_bias ) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, **kwargs: Any, # to accept past_key_value and use_cache kwargs ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.TransientGlobalSelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerCrossAttention with T5->LongT5 class LongT5LayerCrossAttention(nn.Module): def __init__(self, config): super().__init__() self.EncDecAttention = LongT5Attention(config, has_relative_attention_bias=False) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, query_length=None, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention( normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, ) layer_output = hidden_states + self.dropout(attention_output[0]) outputs = (layer_output,) + attention_output[1:] # add attentions if we output them return outputs class LongT5Block(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder if config.is_decoder: attention_layer = LongT5LayerSelfAttention elif config.encoder_attention_type == "local": attention_layer = LongT5LayerLocalSelfAttention elif config.encoder_attention_type == "transient-global": attention_layer = LongT5LayerTransientGlobalSelfAttention else: raise ValueError( "For encoder attention mechanism, either `local` or `transient-global` attention type is expected, " f"but got {config.encoder_attention_type}." ) self.layer = nn.ModuleList() self.layer.append(attention_layer(config, has_relative_attention_bias=has_relative_attention_bias)) if self.is_decoder: self.layer.append(LongT5LayerCrossAttention(config)) self.layer.append(LongT5LayerFF(config)) def forward( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, return_dict=True, ): if past_key_value is not None: if not self.is_decoder: logger.warning("`past_key_values` is passed to the encoder. Please make sure this is intended.") expected_num_past_key_values = 2 if encoder_hidden_states is None else 4 if len(past_key_value) != expected_num_past_key_values: raise ValueError( f"There should be {expected_num_past_key_values} past states. " f"{'2 (past / key) for cross attention. ' if expected_num_past_key_values == 4 else ''}" f"Got {len(past_key_value)} past key / value states" ) self_attn_past_key_value = past_key_value[:2] cross_attn_past_key_value = past_key_value[2:] else: self_attn_past_key_value, cross_attn_past_key_value = None, None self_attention_outputs = self.layer[0]( hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=self_attn_past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states, present_key_value_state = self_attention_outputs[:2] attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights # clamp inf values to enable fp16 inference - check https://github.com/huggingface/transformers/pull/19229/ if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) do_cross_attention = self.is_decoder and encoder_hidden_states is not None if do_cross_attention: # the actual query length is unknown for cross attention # if using past key value states. Need to inject it here if present_key_value_state is not None: query_length = present_key_value_state[0].shape[2] else: query_length = None cross_attention_outputs = self.layer[1]( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, query_length=query_length, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = cross_attention_outputs[0] # clamp inf values to enable fp16 inference - check https://github.com/huggingface/transformers/pull/19229/ if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) # Combine self attn and cross attn key value states if present_key_value_state is not None: present_key_value_state = present_key_value_state + cross_attention_outputs[1] # Keep cross-attention outputs and relative position weights attention_outputs = attention_outputs + cross_attention_outputs[2:] # Apply Feed Forward layer hidden_states = self.layer[-1](hidden_states) # clamp inf values to enable fp16 inference - check https://github.com/huggingface/transformers/pull/19229/ if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if use_cache: outputs = outputs + (present_key_value_state,) + attention_outputs else: outputs = outputs + attention_outputs return outputs # hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) class LongT5PreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = LongT5Config base_model_prefix = "transformer" supports_gradient_checkpointing = True @property # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel.dummy_inputs def dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK) dummy_inputs = { "decoder_input_ids": input_ids, "input_ids": input_ids, "decoder_attention_mask": input_mask, } return dummy_inputs def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor # Used for testing weights initialization if isinstance(module, LongT5LayerNorm): module.weight.data.fill_(factor * 1.0) elif isinstance(module, (LongT5Model, LongT5ForConditionalGeneration, LongT5EncoderModel)): # Mesh TensorFlow embeddings initialization # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0) elif isinstance(module, LongT5DenseActDense): # Mesh TensorFlow FF initialization # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56 # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89 module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi, "bias") and module.wi.bias is not None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, LongT5DenseGatedActDense): module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None: module.wi_0.bias.data.zero_() module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None: module.wi_1.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, (LongT5Attention, LongT5LocalAttention, LongT5TransientGlobalAttention)): # Mesh TensorFlow attention initialization to avoid scaling before softmax # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 d_model = self.config.d_model key_value_proj_dim = self.config.d_kv n_heads = self.config.num_heads module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)) module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5)) if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5)) if isinstance(module, LongT5TransientGlobalAttention): module.global_relative_attention_bias.weight.data.normal_( mean=0.0, std=factor * ((d_model) ** -0.5) ) # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._set_gradient_checkpointing with T5->LongT5 def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (LongT5Attention, LongT5Stack)): module.gradient_checkpointing = value # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right with T5->LongT5 def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id assert decoder_start_token_id is not None, ( "self.model.config.decoder_start_token_id has to be defined. In LongT5 it is usually set to the" " pad_token_id. See LongT5 docs for more information" ) # shift inputs to the right if is_torch_fx_proxy(input_ids): # Item assignment is not supported natively for proxies. shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined." # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids class LongT5Stack(LongT5PreTrainedModel): def __init__(self, config, embed_tokens=None): super().__init__(config) self.embed_tokens = embed_tokens self.is_decoder = config.is_decoder self.local_radius = config.local_radius self.block_len = self.local_radius + 1 self.block = nn.ModuleList( [LongT5Block(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)] ) self.final_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) # Initialize weights and apply final processing self.post_init() self.gradient_checkpointing = False # Copied from transformers.models.t5.modeling_t5.T5Stack.get_input_embeddings def get_input_embeddings(self): return self.embed_tokens # Copied from transformers.models.t5.modeling_t5.T5Stack.set_input_embeddings def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError( f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time" ) elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError(f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds") if inputs_embeds is None: assert self.embed_tokens is not None, "You have to initialize the model with valid token embeddings" inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape # required mask seq length can be calculated via length of past mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length if use_cache is True: assert self.is_decoder, f"`use_cache` can only be set to `True` if {self} is used as a decoder" if attention_mask is None: attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None: encoder_seq_length = encoder_hidden_states.shape[1] encoder_attention_mask = torch.ones( batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long ) # initialize past_key_values with `None` if past does not exist if past_key_values is None: past_key_values = [None] * len(self.block) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. # We use local attention in encoder self-attention, otherwise standard self & cross attentions are used if self.is_decoder: extended_attention_mask = self.get_extended_attention_mask( attention_mask, input_shape, inputs_embeds.device ) elif self.config.encoder_attention_type == "local": extended_attention_mask = _get_local_attention_mask(attention_mask, self.block_len, inputs_embeds.device) else: # we need to use both local attention mask and standard extended mask for transient-global attention extended_attention_mask = attention_mask # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed head_mask = self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers) present_key_value_states = () if use_cache else None all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if (output_attentions and self.is_decoder) else None position_bias = None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds) for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)): layer_head_mask = head_mask[i] cross_attn_layer_head_mask = cross_attn_head_mask[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: if use_cache: use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return tuple(module(*inputs, use_cache, output_attentions)) return custom_forward layer_outputs = checkpoint( create_custom_forward(layer_module), hidden_states, extended_attention_mask, position_bias, encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias, layer_head_mask, cross_attn_layer_head_mask, None, # past_key_value is always None with gradient checkpointing ) else: layer_outputs = layer_module( hidden_states, attention_mask=extended_attention_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) # layer_outputs is a tuple with: # hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) if use_cache is False: layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:] hidden_states, present_key_value_state = layer_outputs[:2] # We share the position biases between the layers - the first layer store them # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights), # (cross-attention position bias), (cross-attention weights) position_bias = layer_outputs[2] if self.is_decoder and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3] # append next layer key value states if use_cache: present_key_value_states = present_key_value_states + (present_key_value_state,) if output_attentions: all_attentions = all_attentions + (layer_outputs[3],) if self.is_decoder: all_cross_attentions = all_cross_attentions + (layer_outputs[5],) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, present_key_value_states, all_hidden_states, all_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) LONGT5_START_DOCSTRING = r""" The LongT5 model was proposed in [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung and Yinfei Yang. It's an encoder-decoder transformer pre-trained in a text-to-text denoising generative setting. LongT5 model is an extension of T5 model, and it enables using one of the two different efficient attention mechanisms - (1) Local attention, or (2) Transient-Global attention. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`LongT5Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ LONGT5_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [LONGT5 Training](./longt5#training). attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) LONGT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [LONGT5 Training](./longt5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ LONGT5_ENCODER_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. To know more on how to prepare `input_ids` for pretraining take a look a [LONGT5 Training](./longt5#training). attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ # Warning message for FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask __HEAD_MASK_WARNING_MSG = """ The input argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently, `decoder_head_mask` is set to copy `head_mask`, but this feature is deprecated and will be removed in future versions. If you do not want to use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers, num_heads)`. """ @add_start_docstrings( "The bare LONGT5 Model transformer outputting raw hidden-states without any specific head on top.", LONGT5_START_DOCSTRING, ) class LongT5Model(LongT5PreTrainedModel): _keys_to_ignore_on_load_missing = [ r"encoder.embed_tokens.weight", r"decoder.embed_tokens.weight", ] _keys_to_ignore_on_load_unexpected = [ r"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight", ] def __init__(self, config: LongT5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = LongT5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = LongT5Stack(decoder_config, self.shared) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.Tensor] = None, decoder_inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]: r""" Returns: Example: ```python >>> from transformers import T5Tokenizer, LongT5Model >>> tokenizer = T5Tokenizer.from_pretrained("google/long-t5-local-base") >>> model = LongT5Model.from_pretrained("google/long-t5-local-base") >>> # Let's try a very long encoder input. >>> input_ids = tokenizer( ... 100 * "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 >>> # forward pass >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state ```""" use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode if needed (training, first prediction pass) if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings("""LONGT5 Model with a `language modeling` head on top.""", LONGT5_START_DOCSTRING) class LongT5ForConditionalGeneration(LongT5PreTrainedModel): _keys_to_ignore_on_load_missing = [ r"encoder.embed_tokens.weight", r"decoder.embed_tokens.weight", r"lm_head.weight", ] _keys_to_ignore_on_load_unexpected = [ r"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight", ] def __init__(self, config: LongT5Config): super().__init__(config) self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = LongT5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = LongT5Stack(decoder_config, self.shared) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def get_output_embeddings(self): return self.lm_head def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` Returns: Examples: ```python >>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("Stancld/longt5-tglobal-large-16384-pubmed-3k_steps") >>> model = LongT5ForConditionalGeneration.from_pretrained( ... "Stancld/longt5-tglobal-large-16384-pubmed-3k_steps" ... ) >>> # Let's try a very long input. >>> inputs = tokenizer(100 * "studies have shown that owning a dog is good for you ", return_tensors="pt") >>> input_ids = inputs.input_ids >>> outputs = model.generate(input_ids) >>> print(tokenizer.decode(outputs[0], skip_special_tokens=True)) abstractthe aim of this article is to provide an overview of the literature on the role of dog ```""" use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode if needed (training, first prediction pass) if encoder_outputs is None: # Convert encoder inputs in embeddings if needed encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None: # get decoder inputs from shifting lm labels to the right decoder_input_ids = self._shift_right(labels) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: # Rescale output before projecting on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output * (self.model_dim**-0.5) lm_logits = self.lm_head(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666 if not return_dict: output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs return ((loss,) + output) if loss is not None else output return Seq2SeqLMOutput( loss=loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past=None, attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs ): # cut decoder_input_ids if past is used if past is not None: input_ids = input_ids[:, -1:] return { "decoder_input_ids": input_ids, "past_key_values": past, "encoder_outputs": encoder_outputs, "attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, "use_cache": use_cache, } def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels) def _reorder_cache(self, past, beam_idx): # if decoder past is not included in output # speedy decoding is disabled and no need to reorder if past is None: logger.warning("You might want to consider setting `use_cache=True` to speed up decoding") return past reordered_decoder_past = () for layer_past_states in past: # get the correct batch idx from layer past batch dim # batch dim of `past` is at 2nd position reordered_layer_past_states = () for layer_past_state in layer_past_states: # need to set correct `past` for each of the four key / value states reordered_layer_past_states = reordered_layer_past_states + ( layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)), ) assert reordered_layer_past_states[0].shape == layer_past_states[0].shape assert len(reordered_layer_past_states) == len(layer_past_states) reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,) return reordered_decoder_past @add_start_docstrings( "The bare LONGT5 Model transformer outputting encoder's raw hidden-states without any specific head on top.", LONGT5_START_DOCSTRING, ) class LongT5EncoderModel(LongT5PreTrainedModel): _keys_to_ignore_on_load_missing = [r"encoder.embed_tokens.weight"] def __init__(self, config: LongT5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = LongT5Stack(encoder_config, self.shared) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(LONGT5_ENCODER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]: r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("google/long-t5-local-base") >>> model = LongT5EncoderModel.from_pretrained("google/long-t5-local-base") >>> input_ids = tokenizer( ... 100 * "Studies have been shown that owning a dog is good for you ", return_tensors="pt" ... ).input_ids # Batch size 1 >>> outputs = model(input_ids=input_ids) >>> last_hidden_states = outputs.last_hidden_state ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) return encoder_outputs
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./src/transformers/models/luke/modeling_luke.py
# coding=utf-8 # Copyright Studio Ousia and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch LUKE model.""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN, gelu from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_luke import LukeConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "LukeConfig" _TOKENIZER_FOR_DOC = "LukeTokenizer" _CHECKPOINT_FOR_DOC = "studio-ousia/luke-base" LUKE_PRETRAINED_MODEL_ARCHIVE_LIST = [ "studio-ousia/luke-base", "studio-ousia/luke-large", # See all LUKE models at https://huggingface.co/models?filter=luke ] @dataclass class BaseLukeModelOutputWithPooling(BaseModelOutputWithPooling): """ Base class for outputs of the LUKE model. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. entity_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, entity_length, hidden_size)`): Sequence of entity hidden-states at the output of the last layer of the model. pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length + entity_length, sequence_length + entity_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ entity_last_hidden_state: torch.FloatTensor = None entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None @dataclass class BaseLukeModelOutput(BaseModelOutput): """ Base class for model's outputs, with potential hidden states and attentions. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. entity_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, entity_length, hidden_size)`): Sequence of entity hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ entity_last_hidden_state: torch.FloatTensor = None entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None @dataclass class LukeMaskedLMOutput(ModelOutput): """ Base class for model's outputs, with potential hidden states and attentions. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): The sum of masked language modeling (MLM) loss and entity prediction loss. mlm_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Masked language modeling (MLM) loss. mep_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Masked entity prediction (MEP) loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). entity_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the entity prediction head (scores for each entity vocabulary token before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None mlm_loss: Optional[torch.FloatTensor] = None mep_loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None entity_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class EntityClassificationOutput(ModelOutput): """ Outputs of entity classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class EntityPairClassificationOutput(ModelOutput): """ Outputs of entity pair classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class EntitySpanClassificationOutput(ModelOutput): """ Outputs of entity span classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, entity_length, config.num_labels)`): Classification scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class LukeSequenceClassifierOutput(ModelOutput): """ Outputs of sentence classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class LukeTokenClassifierOutput(ModelOutput): """ Base class for outputs of token classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided) : Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`): Classification scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class LukeQuestionAnsweringModelOutput(ModelOutput): """ Outputs of question answering models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Span-start scores (before SoftMax). end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Span-end scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None start_logits: torch.FloatTensor = None end_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class LukeMultipleChoiceModelOutput(ModelOutput): """ Outputs of multiple choice models. Args: loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`): *num_choices* is the second dimension of the input tensors. (see *input_ids* above). Classification scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None class LukeEmbeddings(nn.Module): """ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. """ def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # End copy self.padding_idx = config.pad_token_id self.position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx ) def forward( self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, ): if position_ids is None: if input_ids is not None: # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx).to(input_ids.device) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings def create_position_ids_from_inputs_embeds(self, inputs_embeds): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange( self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device ) return position_ids.unsqueeze(0).expand(input_shape) class LukeEntityEmbeddings(nn.Module): def __init__(self, config: LukeConfig): super().__init__() self.config = config self.entity_embeddings = nn.Embedding(config.entity_vocab_size, config.entity_emb_size, padding_idx=0) if config.entity_emb_size != config.hidden_size: self.entity_embedding_dense = nn.Linear(config.entity_emb_size, config.hidden_size, bias=False) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward( self, entity_ids: torch.LongTensor, position_ids: torch.LongTensor, token_type_ids: torch.LongTensor = None ): if token_type_ids is None: token_type_ids = torch.zeros_like(entity_ids) entity_embeddings = self.entity_embeddings(entity_ids) if self.config.entity_emb_size != self.config.hidden_size: entity_embeddings = self.entity_embedding_dense(entity_embeddings) position_embeddings = self.position_embeddings(position_ids.clamp(min=0)) position_embedding_mask = (position_ids != -1).type_as(position_embeddings).unsqueeze(-1) position_embeddings = position_embeddings * position_embedding_mask position_embeddings = torch.sum(position_embeddings, dim=-2) position_embeddings = position_embeddings / position_embedding_mask.sum(dim=-2).clamp(min=1e-7) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = entity_embeddings + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class LukeSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size {config.hidden_size,} is not a multiple of the number of attention " f"heads {config.num_attention_heads}." ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.use_entity_aware_attention = config.use_entity_aware_attention self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) if self.use_entity_aware_attention: self.w2e_query = nn.Linear(config.hidden_size, self.all_head_size) self.e2w_query = nn.Linear(config.hidden_size, self.all_head_size) self.e2e_query = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, word_hidden_states, entity_hidden_states, attention_mask=None, head_mask=None, output_attentions=False, ): word_size = word_hidden_states.size(1) if entity_hidden_states is None: concat_hidden_states = word_hidden_states else: concat_hidden_states = torch.cat([word_hidden_states, entity_hidden_states], dim=1) key_layer = self.transpose_for_scores(self.key(concat_hidden_states)) value_layer = self.transpose_for_scores(self.value(concat_hidden_states)) if self.use_entity_aware_attention and entity_hidden_states is not None: # compute query vectors using word-word (w2w), word-entity (w2e), entity-word (e2w), entity-entity (e2e) # query layers w2w_query_layer = self.transpose_for_scores(self.query(word_hidden_states)) w2e_query_layer = self.transpose_for_scores(self.w2e_query(word_hidden_states)) e2w_query_layer = self.transpose_for_scores(self.e2w_query(entity_hidden_states)) e2e_query_layer = self.transpose_for_scores(self.e2e_query(entity_hidden_states)) # compute w2w, w2e, e2w, and e2e key vectors used with the query vectors computed above w2w_key_layer = key_layer[:, :, :word_size, :] e2w_key_layer = key_layer[:, :, :word_size, :] w2e_key_layer = key_layer[:, :, word_size:, :] e2e_key_layer = key_layer[:, :, word_size:, :] # compute attention scores based on the dot product between the query and key vectors w2w_attention_scores = torch.matmul(w2w_query_layer, w2w_key_layer.transpose(-1, -2)) w2e_attention_scores = torch.matmul(w2e_query_layer, w2e_key_layer.transpose(-1, -2)) e2w_attention_scores = torch.matmul(e2w_query_layer, e2w_key_layer.transpose(-1, -2)) e2e_attention_scores = torch.matmul(e2e_query_layer, e2e_key_layer.transpose(-1, -2)) # combine attention scores to create the final attention score matrix word_attention_scores = torch.cat([w2w_attention_scores, w2e_attention_scores], dim=3) entity_attention_scores = torch.cat([e2w_attention_scores, e2e_attention_scores], dim=3) attention_scores = torch.cat([word_attention_scores, entity_attention_scores], dim=2) else: query_layer = self.transpose_for_scores(self.query(concat_hidden_states)) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in LukeModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) output_word_hidden_states = context_layer[:, :word_size, :] if entity_hidden_states is None: output_entity_hidden_states = None else: output_entity_hidden_states = context_layer[:, word_size:, :] if output_attentions: outputs = (output_word_hidden_states, output_entity_hidden_states, attention_probs) else: outputs = (output_word_hidden_states, output_entity_hidden_states) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput class LukeSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class LukeAttention(nn.Module): def __init__(self, config): super().__init__() self.self = LukeSelfAttention(config) self.output = LukeSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): raise NotImplementedError("LUKE does not support the pruning of attention heads") def forward( self, word_hidden_states, entity_hidden_states, attention_mask=None, head_mask=None, output_attentions=False, ): word_size = word_hidden_states.size(1) self_outputs = self.self( word_hidden_states, entity_hidden_states, attention_mask, head_mask, output_attentions, ) if entity_hidden_states is None: concat_self_outputs = self_outputs[0] concat_hidden_states = word_hidden_states else: concat_self_outputs = torch.cat(self_outputs[:2], dim=1) concat_hidden_states = torch.cat([word_hidden_states, entity_hidden_states], dim=1) attention_output = self.output(concat_self_outputs, concat_hidden_states) word_attention_output = attention_output[:, :word_size, :] if entity_hidden_states is None: entity_attention_output = None else: entity_attention_output = attention_output[:, word_size:, :] # add attentions if we output them outputs = (word_attention_output, entity_attention_output) + self_outputs[2:] return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate class LukeIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput class LukeOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class LukeLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = LukeAttention(config) self.intermediate = LukeIntermediate(config) self.output = LukeOutput(config) def forward( self, word_hidden_states, entity_hidden_states, attention_mask=None, head_mask=None, output_attentions=False, ): word_size = word_hidden_states.size(1) self_attention_outputs = self.attention( word_hidden_states, entity_hidden_states, attention_mask, head_mask, output_attentions=output_attentions, ) if entity_hidden_states is None: concat_attention_output = self_attention_outputs[0] else: concat_attention_output = torch.cat(self_attention_outputs[:2], dim=1) outputs = self_attention_outputs[2:] # add self attentions if we output attention weights layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, concat_attention_output ) word_layer_output = layer_output[:, :word_size, :] if entity_hidden_states is None: entity_layer_output = None else: entity_layer_output = layer_output[:, word_size:, :] outputs = (word_layer_output, entity_layer_output) + outputs return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class LukeEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([LukeLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, word_hidden_states, entity_hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_word_hidden_states = () if output_hidden_states else None all_entity_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_word_hidden_states = all_word_hidden_states + (word_hidden_states,) all_entity_hidden_states = all_entity_hidden_states + (entity_hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), word_hidden_states, entity_hidden_states, attention_mask, layer_head_mask, ) else: layer_outputs = layer_module( word_hidden_states, entity_hidden_states, attention_mask, layer_head_mask, output_attentions, ) word_hidden_states = layer_outputs[0] if entity_hidden_states is not None: entity_hidden_states = layer_outputs[1] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[2],) if output_hidden_states: all_word_hidden_states = all_word_hidden_states + (word_hidden_states,) all_entity_hidden_states = all_entity_hidden_states + (entity_hidden_states,) if not return_dict: return tuple( v for v in [ word_hidden_states, all_word_hidden_states, all_self_attentions, entity_hidden_states, all_entity_hidden_states, ] if v is not None ) return BaseLukeModelOutput( last_hidden_state=word_hidden_states, hidden_states=all_word_hidden_states, attentions=all_self_attentions, entity_last_hidden_state=entity_hidden_states, entity_hidden_states=all_entity_hidden_states, ) # Copied from transformers.models.bert.modeling_bert.BertPooler class LukePooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class EntityPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.entity_emb_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.entity_emb_size, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class EntityPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.config = config self.transform = EntityPredictionHeadTransform(config) self.decoder = nn.Linear(config.entity_emb_size, config.entity_vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.entity_vocab_size)) def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) + self.bias return hidden_states class LukePreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = LukeConfig base_model_prefix = "luke" supports_gradient_checkpointing = True _no_split_modules = ["LukeAttention", "LukeEntityEmbeddings"] def _init_weights(self, module: nn.Module): """Initialize the weights""" if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): if module.embedding_dim == 1: # embedding for bias parameters module.weight.data.zero_() else: module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, LukeEncoder): module.gradient_checkpointing = value LUKE_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`LukeConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ LUKE_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`LukeTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`): Indices of entity tokens in the entity vocabulary. Indices can be obtained using [`LukeTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*): Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`: - 1 for entity tokens that are **not masked**, - 0 for entity tokens that are **masked**. entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*): Segment token indices to indicate first and second portions of the entity token inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *portion A* entity token, - 1 corresponds to a *portion B* entity token. entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*): Indices of positions of each input entity in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare LUKE model transformer outputting raw hidden-states for both word tokens and entities without any" " specific head on top.", LUKE_START_DOCSTRING, ) class LukeModel(LukePreTrainedModel): _keys_to_ignore_on_load_missing = [r"position_ids"] def __init__(self, config: LukeConfig, add_pooling_layer: bool = True): super().__init__(config) self.config = config self.embeddings = LukeEmbeddings(config) self.entity_embeddings = LukeEntityEmbeddings(config) self.encoder = LukeEncoder(config) self.pooler = LukePooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def get_entity_embeddings(self): return self.entity_embeddings.entity_embeddings def set_entity_embeddings(self, value): self.entity_embeddings.entity_embeddings = value def _prune_heads(self, heads_to_prune): raise NotImplementedError("LUKE does not support the pruning of attention heads") @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=BaseLukeModelOutputWithPooling, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.FloatTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseLukeModelOutputWithPooling]: r""" Returns: Examples: ```python >>> from transformers import LukeTokenizer, LukeModel >>> tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base") >>> model = LukeModel.from_pretrained("studio-ousia/luke-base") # Compute the contextualized entity representation corresponding to the entity mention "Beyoncé" >>> text = "Beyoncé lives in Los Angeles." >>> entity_spans = [(0, 7)] # character-based entity span corresponding to "Beyoncé" >>> encoding = tokenizer(text, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt") >>> outputs = model(**encoding) >>> word_last_hidden_state = outputs.last_hidden_state >>> entity_last_hidden_state = outputs.entity_last_hidden_state # Input Wikipedia entities to obtain enriched contextualized representations of word tokens >>> text = "Beyoncé lives in Los Angeles." >>> entities = [ ... "Beyoncé", ... "Los Angeles", ... ] # Wikipedia entity titles corresponding to the entity mentions "Beyoncé" and "Los Angeles" >>> entity_spans = [ ... (0, 7), ... (17, 28), ... ] # character-based entity spans corresponding to "Beyoncé" and "Los Angeles" >>> encoding = tokenizer( ... text, entities=entities, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt" ... ) >>> outputs = model(**encoding) >>> word_last_hidden_state = outputs.last_hidden_state >>> entity_last_hidden_state = outputs.entity_last_hidden_state ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length), device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) if entity_ids is not None: entity_seq_length = entity_ids.size(1) if entity_attention_mask is None: entity_attention_mask = torch.ones((batch_size, entity_seq_length), device=device) if entity_token_type_ids is None: entity_token_type_ids = torch.zeros((batch_size, entity_seq_length), dtype=torch.long, device=device) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) # First, compute word embeddings word_embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) # Second, compute extended attention mask extended_attention_mask = self.get_extended_attention_mask(attention_mask, entity_attention_mask) # Third, compute entity embeddings and concatenate with word embeddings if entity_ids is None: entity_embedding_output = None else: entity_embedding_output = self.entity_embeddings(entity_ids, entity_position_ids, entity_token_type_ids) # Fourth, send embeddings through the model encoder_outputs = self.encoder( word_embedding_output, entity_embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # Fifth, get the output. LukeModel outputs the same as BertModel, namely sequence_output of shape (batch_size, seq_len, hidden_size) sequence_output = encoder_outputs[0] # Sixth, we compute the pooled_output, word_sequence_output and entity_sequence_output based on the sequence_output pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseLukeModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, entity_last_hidden_state=encoder_outputs.entity_last_hidden_state, entity_hidden_states=encoder_outputs.entity_hidden_states, ) def get_extended_attention_mask( self, word_attention_mask: torch.LongTensor, entity_attention_mask: Optional[torch.LongTensor] ): """ Makes broadcastable attention and causal masks so that future and masked tokens are ignored. Arguments: word_attention_mask (`torch.LongTensor`): Attention mask for word tokens with ones indicating tokens to attend to, zeros for tokens to ignore. entity_attention_mask (`torch.LongTensor`, *optional*): Attention mask for entity tokens with ones indicating tokens to attend to, zeros for tokens to ignore. Returns: `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`. """ attention_mask = word_attention_mask if entity_attention_mask is not None: attention_mask = torch.cat([attention_mask, entity_attention_mask], dim=-1) if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.dim() == 2: extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError(f"Wrong shape for attention_mask (shape {attention_mask.shape})") extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(self.dtype).min return extended_attention_mask def create_position_ids_from_input_ids(input_ids, padding_idx): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: torch.Tensor x: Returns: torch.Tensor """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask)) * mask return incremental_indices.long() + padding_idx # Copied from transformers.models.roberta.modeling_roberta.RobertaLMHead class LukeLMHead(nn.Module): """Roberta Head for masked language modeling.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.decoder = nn.Linear(config.hidden_size, config.vocab_size) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def forward(self, features, **kwargs): x = self.dense(features) x = gelu(x) x = self.layer_norm(x) # project back to size of vocabulary with bias x = self.decoder(x) return x def _tie_weights(self): # To tie those two weights if they get disconnected (on TPU or when the bias is resized) # For accelerate compatibility and to not break backward compatibility if self.decoder.bias.device.type == "meta": self.decoder.bias = self.bias else: self.bias = self.decoder.bias @add_start_docstrings( """ The LUKE model with a language modeling head and entity prediction head on top for masked language modeling and masked entity prediction. """, LUKE_START_DOCSTRING, ) class LukeForMaskedLM(LukePreTrainedModel): _keys_to_ignore_on_save = [ r"lm_head.decoder.weight", r"lm_head.decoder.bias", r"entity_predictions.decoder.weight", ] _keys_to_ignore_on_load_missing = [ r"position_ids", r"lm_head.decoder.weight", r"lm_head.decoder.bias", r"entity_predictions.decoder.weight", ] def __init__(self, config): super().__init__(config) self.luke = LukeModel(config) self.lm_head = LukeLMHead(config) self.entity_predictions = EntityPredictionHead(config) self.loss_fn = nn.CrossEntropyLoss(ignore_index=-1) # Initialize weights and apply final processing self.post_init() def tie_weights(self): super().tie_weights() self._tie_or_clone_weights(self.entity_predictions.decoder, self.luke.entity_embeddings.entity_embeddings) def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=LukeMaskedLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.LongTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, labels: Optional[torch.LongTensor] = None, entity_labels: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, LukeMaskedLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` entity_labels (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` Returns: """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.luke( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) loss = None mlm_loss = None logits = self.lm_head(outputs.last_hidden_state) if labels is not None: mlm_loss = self.loss_fn(logits.view(-1, self.config.vocab_size), labels.view(-1)) if loss is None: loss = mlm_loss mep_loss = None entity_logits = None if outputs.entity_last_hidden_state is not None: entity_logits = self.entity_predictions(outputs.entity_last_hidden_state) if entity_labels is not None: mep_loss = self.loss_fn(entity_logits.view(-1, self.config.entity_vocab_size), entity_labels.view(-1)) if loss is None: loss = mep_loss else: loss = loss + mep_loss if not return_dict: return tuple( v for v in [ loss, mlm_loss, mep_loss, logits, entity_logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions, ] if v is not None ) return LukeMaskedLMOutput( loss=loss, mlm_loss=mlm_loss, mep_loss=mep_loss, logits=logits, entity_logits=entity_logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ The LUKE model with a classification head on top (a linear layer on top of the hidden state of the first entity token) for entity classification tasks, such as Open Entity. """, LUKE_START_DOCSTRING, ) class LukeForEntityClassification(LukePreTrainedModel): def __init__(self, config): super().__init__(config) self.luke = LukeModel(config) self.num_labels = config.num_labels self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=EntityClassificationOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.FloatTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, EntityClassificationOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)` or `(batch_size, num_labels)`, *optional*): Labels for computing the classification loss. If the shape is `(batch_size,)`, the cross entropy loss is used for the single-label classification. In this case, labels should contain the indices that should be in `[0, ..., config.num_labels - 1]`. If the shape is `(batch_size, num_labels)`, the binary cross entropy loss is used for the multi-label classification. In this case, labels should only contain `[0, 1]`, where 0 and 1 indicate false and true, respectively. Returns: Examples: ```python >>> from transformers import LukeTokenizer, LukeForEntityClassification >>> tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-open-entity") >>> model = LukeForEntityClassification.from_pretrained("studio-ousia/luke-large-finetuned-open-entity") >>> text = "Beyoncé lives in Los Angeles." >>> entity_spans = [(0, 7)] # character-based entity span corresponding to "Beyoncé" >>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> predicted_class_idx = logits.argmax(-1).item() >>> print("Predicted class:", model.config.id2label[predicted_class_idx]) Predicted class: person ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.luke( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) feature_vector = outputs.entity_last_hidden_state[:, 0, :] feature_vector = self.dropout(feature_vector) logits = self.classifier(feature_vector) loss = None if labels is not None: # When the number of dimension of `labels` is 1, cross entropy is used as the loss function. The binary # cross entropy is used otherwise. if labels.ndim == 1: loss = nn.functional.cross_entropy(logits, labels) else: loss = nn.functional.binary_cross_entropy_with_logits(logits.view(-1), labels.view(-1).type_as(logits)) if not return_dict: return tuple( v for v in [loss, logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions] if v is not None ) return EntityClassificationOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ The LUKE model with a classification head on top (a linear layer on top of the hidden states of the two entity tokens) for entity pair classification tasks, such as TACRED. """, LUKE_START_DOCSTRING, ) class LukeForEntityPairClassification(LukePreTrainedModel): def __init__(self, config): super().__init__(config) self.luke = LukeModel(config) self.num_labels = config.num_labels self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size * 2, config.num_labels, False) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=EntityPairClassificationOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.FloatTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, EntityPairClassificationOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)` or `(batch_size, num_labels)`, *optional*): Labels for computing the classification loss. If the shape is `(batch_size,)`, the cross entropy loss is used for the single-label classification. In this case, labels should contain the indices that should be in `[0, ..., config.num_labels - 1]`. If the shape is `(batch_size, num_labels)`, the binary cross entropy loss is used for the multi-label classification. In this case, labels should only contain `[0, 1]`, where 0 and 1 indicate false and true, respectively. Returns: Examples: ```python >>> from transformers import LukeTokenizer, LukeForEntityPairClassification >>> tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-tacred") >>> model = LukeForEntityPairClassification.from_pretrained("studio-ousia/luke-large-finetuned-tacred") >>> text = "Beyoncé lives in Los Angeles." >>> entity_spans = [ ... (0, 7), ... (17, 28), ... ] # character-based entity spans corresponding to "Beyoncé" and "Los Angeles" >>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> predicted_class_idx = logits.argmax(-1).item() >>> print("Predicted class:", model.config.id2label[predicted_class_idx]) Predicted class: per:cities_of_residence ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.luke( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) feature_vector = torch.cat( [outputs.entity_last_hidden_state[:, 0, :], outputs.entity_last_hidden_state[:, 1, :]], dim=1 ) feature_vector = self.dropout(feature_vector) logits = self.classifier(feature_vector) loss = None if labels is not None: # When the number of dimension of `labels` is 1, cross entropy is used as the loss function. The binary # cross entropy is used otherwise. if labels.ndim == 1: loss = nn.functional.cross_entropy(logits, labels) else: loss = nn.functional.binary_cross_entropy_with_logits(logits.view(-1), labels.view(-1).type_as(logits)) if not return_dict: return tuple( v for v in [loss, logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions] if v is not None ) return EntityPairClassificationOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ The LUKE model with a span classification head on top (a linear layer on top of the hidden states output) for tasks such as named entity recognition. """, LUKE_START_DOCSTRING, ) class LukeForEntitySpanClassification(LukePreTrainedModel): def __init__(self, config): super().__init__(config) self.luke = LukeModel(config) self.num_labels = config.num_labels self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size * 3, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=EntitySpanClassificationOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask=None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.LongTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, entity_start_positions: Optional[torch.LongTensor] = None, entity_end_positions: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, EntitySpanClassificationOutput]: r""" entity_start_positions (`torch.LongTensor`): The start positions of entities in the word token sequence. entity_end_positions (`torch.LongTensor`): The end positions of entities in the word token sequence. labels (`torch.LongTensor` of shape `(batch_size, entity_length)` or `(batch_size, entity_length, num_labels)`, *optional*): Labels for computing the classification loss. If the shape is `(batch_size, entity_length)`, the cross entropy loss is used for the single-label classification. In this case, labels should contain the indices that should be in `[0, ..., config.num_labels - 1]`. If the shape is `(batch_size, entity_length, num_labels)`, the binary cross entropy loss is used for the multi-label classification. In this case, labels should only contain `[0, 1]`, where 0 and 1 indicate false and true, respectively. Returns: Examples: ```python >>> from transformers import LukeTokenizer, LukeForEntitySpanClassification >>> tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-conll-2003") >>> model = LukeForEntitySpanClassification.from_pretrained("studio-ousia/luke-large-finetuned-conll-2003") >>> text = "Beyoncé lives in Los Angeles" # List all possible entity spans in the text >>> word_start_positions = [0, 8, 14, 17, 21] # character-based start positions of word tokens >>> word_end_positions = [7, 13, 16, 20, 28] # character-based end positions of word tokens >>> entity_spans = [] >>> for i, start_pos in enumerate(word_start_positions): ... for end_pos in word_end_positions[i:]: ... entity_spans.append((start_pos, end_pos)) >>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> predicted_class_indices = logits.argmax(-1).squeeze().tolist() >>> for span, predicted_class_idx in zip(entity_spans, predicted_class_indices): ... if predicted_class_idx != 0: ... print(text[span[0] : span[1]], model.config.id2label[predicted_class_idx]) Beyoncé PER Los Angeles LOC ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.luke( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) hidden_size = outputs.last_hidden_state.size(-1) entity_start_positions = entity_start_positions.unsqueeze(-1).expand(-1, -1, hidden_size) if entity_start_positions.device != outputs.last_hidden_state.device: entity_start_positions = entity_start_positions.to(outputs.last_hidden_state.device) start_states = torch.gather(outputs.last_hidden_state, -2, entity_start_positions) entity_end_positions = entity_end_positions.unsqueeze(-1).expand(-1, -1, hidden_size) if entity_end_positions.device != outputs.last_hidden_state.device: entity_end_positions = entity_end_positions.to(outputs.last_hidden_state.device) end_states = torch.gather(outputs.last_hidden_state, -2, entity_end_positions) feature_vector = torch.cat([start_states, end_states, outputs.entity_last_hidden_state], dim=2) feature_vector = self.dropout(feature_vector) logits = self.classifier(feature_vector) loss = None if labels is not None: # When the number of dimension of `labels` is 2, cross entropy is used as the loss function. The binary # cross entropy is used otherwise. if labels.ndim == 2: loss = nn.functional.cross_entropy(logits.view(-1, self.num_labels), labels.view(-1)) else: loss = nn.functional.binary_cross_entropy_with_logits(logits.view(-1), labels.view(-1).type_as(logits)) if not return_dict: return tuple( v for v in [loss, logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions] if v is not None ) return EntitySpanClassificationOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ The LUKE Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, LUKE_START_DOCSTRING, ) class LukeForSequenceClassification(LukePreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.luke = LukeModel(config) self.dropout = nn.Dropout( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=LukeSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.FloatTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, LukeSequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.luke( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) pooled_output = outputs.pooler_output pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: return tuple( v for v in [loss, logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions] if v is not None ) return LukeSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ The LUKE Model with a token classification head on top (a linear layer on top of the hidden-states output). To solve Named-Entity Recognition (NER) task using LUKE, `LukeForEntitySpanClassification` is more suitable than this class. """, LUKE_START_DOCSTRING, ) class LukeForTokenClassification(LukePreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.luke = LukeModel(config, add_pooling_layer=False) self.dropout = nn.Dropout( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=LukeTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.FloatTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, LukeTokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.luke( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) sequence_output = outputs.last_hidden_state sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: return tuple( v for v in [loss, logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions] if v is not None ) return LukeTokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ The LUKE Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, LUKE_START_DOCSTRING, ) class LukeForQuestionAnswering(LukePreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.luke = LukeModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=LukeQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.FloatTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.FloatTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, LukeQuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.luke( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) sequence_output = outputs.last_hidden_state logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: return tuple( v for v in [ total_loss, start_logits, end_logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions, ] if v is not None ) return LukeQuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ The LUKE Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, LUKE_START_DOCSTRING, ) class LukeForMultipleChoice(LukePreTrainedModel): def __init__(self, config): super().__init__(config) self.luke = LukeModel(config) self.dropout = nn.Dropout( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=LukeMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.FloatTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, LukeMultipleChoiceModelOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) entity_ids = entity_ids.view(-1, entity_ids.size(-1)) if entity_ids is not None else None entity_attention_mask = ( entity_attention_mask.view(-1, entity_attention_mask.size(-1)) if entity_attention_mask is not None else None ) entity_token_type_ids = ( entity_token_type_ids.view(-1, entity_token_type_ids.size(-1)) if entity_token_type_ids is not None else None ) entity_position_ids = ( entity_position_ids.view(-1, entity_position_ids.size(-2), entity_position_ids.size(-1)) if entity_position_ids is not None else None ) outputs = self.luke( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) pooled_output = outputs.pooler_output pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: return tuple( v for v in [ loss, reshaped_logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions, ] if v is not None ) return LukeMultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions, )
# coding=utf-8 # Copyright Studio Ousia and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch LUKE model.""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN, gelu from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_luke import LukeConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "LukeConfig" _TOKENIZER_FOR_DOC = "LukeTokenizer" _CHECKPOINT_FOR_DOC = "studio-ousia/luke-base" LUKE_PRETRAINED_MODEL_ARCHIVE_LIST = [ "studio-ousia/luke-base", "studio-ousia/luke-large", # See all LUKE models at https://huggingface.co/models?filter=luke ] @dataclass class BaseLukeModelOutputWithPooling(BaseModelOutputWithPooling): """ Base class for outputs of the LUKE model. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. entity_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, entity_length, hidden_size)`): Sequence of entity hidden-states at the output of the last layer of the model. pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length + entity_length, sequence_length + entity_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ entity_last_hidden_state: torch.FloatTensor = None entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None @dataclass class BaseLukeModelOutput(BaseModelOutput): """ Base class for model's outputs, with potential hidden states and attentions. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. entity_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, entity_length, hidden_size)`): Sequence of entity hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ entity_last_hidden_state: torch.FloatTensor = None entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None @dataclass class LukeMaskedLMOutput(ModelOutput): """ Base class for model's outputs, with potential hidden states and attentions. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): The sum of masked language modeling (MLM) loss and entity prediction loss. mlm_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Masked language modeling (MLM) loss. mep_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Masked entity prediction (MEP) loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). entity_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the entity prediction head (scores for each entity vocabulary token before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None mlm_loss: Optional[torch.FloatTensor] = None mep_loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None entity_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class EntityClassificationOutput(ModelOutput): """ Outputs of entity classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class EntityPairClassificationOutput(ModelOutput): """ Outputs of entity pair classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class EntitySpanClassificationOutput(ModelOutput): """ Outputs of entity span classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, entity_length, config.num_labels)`): Classification scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class LukeSequenceClassifierOutput(ModelOutput): """ Outputs of sentence classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class LukeTokenClassifierOutput(ModelOutput): """ Base class for outputs of token classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided) : Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`): Classification scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class LukeQuestionAnsweringModelOutput(ModelOutput): """ Outputs of question answering models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Span-start scores (before SoftMax). end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Span-end scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None start_logits: torch.FloatTensor = None end_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class LukeMultipleChoiceModelOutput(ModelOutput): """ Outputs of multiple choice models. Args: loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`): *num_choices* is the second dimension of the input tensors. (see *input_ids* above). Classification scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None class LukeEmbeddings(nn.Module): """ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. """ def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # End copy self.padding_idx = config.pad_token_id self.position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx ) def forward( self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, ): if position_ids is None: if input_ids is not None: # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx).to(input_ids.device) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings def create_position_ids_from_inputs_embeds(self, inputs_embeds): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange( self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device ) return position_ids.unsqueeze(0).expand(input_shape) class LukeEntityEmbeddings(nn.Module): def __init__(self, config: LukeConfig): super().__init__() self.config = config self.entity_embeddings = nn.Embedding(config.entity_vocab_size, config.entity_emb_size, padding_idx=0) if config.entity_emb_size != config.hidden_size: self.entity_embedding_dense = nn.Linear(config.entity_emb_size, config.hidden_size, bias=False) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward( self, entity_ids: torch.LongTensor, position_ids: torch.LongTensor, token_type_ids: torch.LongTensor = None ): if token_type_ids is None: token_type_ids = torch.zeros_like(entity_ids) entity_embeddings = self.entity_embeddings(entity_ids) if self.config.entity_emb_size != self.config.hidden_size: entity_embeddings = self.entity_embedding_dense(entity_embeddings) position_embeddings = self.position_embeddings(position_ids.clamp(min=0)) position_embedding_mask = (position_ids != -1).type_as(position_embeddings).unsqueeze(-1) position_embeddings = position_embeddings * position_embedding_mask position_embeddings = torch.sum(position_embeddings, dim=-2) position_embeddings = position_embeddings / position_embedding_mask.sum(dim=-2).clamp(min=1e-7) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = entity_embeddings + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class LukeSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size {config.hidden_size,} is not a multiple of the number of attention " f"heads {config.num_attention_heads}." ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.use_entity_aware_attention = config.use_entity_aware_attention self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) if self.use_entity_aware_attention: self.w2e_query = nn.Linear(config.hidden_size, self.all_head_size) self.e2w_query = nn.Linear(config.hidden_size, self.all_head_size) self.e2e_query = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, word_hidden_states, entity_hidden_states, attention_mask=None, head_mask=None, output_attentions=False, ): word_size = word_hidden_states.size(1) if entity_hidden_states is None: concat_hidden_states = word_hidden_states else: concat_hidden_states = torch.cat([word_hidden_states, entity_hidden_states], dim=1) key_layer = self.transpose_for_scores(self.key(concat_hidden_states)) value_layer = self.transpose_for_scores(self.value(concat_hidden_states)) if self.use_entity_aware_attention and entity_hidden_states is not None: # compute query vectors using word-word (w2w), word-entity (w2e), entity-word (e2w), entity-entity (e2e) # query layers w2w_query_layer = self.transpose_for_scores(self.query(word_hidden_states)) w2e_query_layer = self.transpose_for_scores(self.w2e_query(word_hidden_states)) e2w_query_layer = self.transpose_for_scores(self.e2w_query(entity_hidden_states)) e2e_query_layer = self.transpose_for_scores(self.e2e_query(entity_hidden_states)) # compute w2w, w2e, e2w, and e2e key vectors used with the query vectors computed above w2w_key_layer = key_layer[:, :, :word_size, :] e2w_key_layer = key_layer[:, :, :word_size, :] w2e_key_layer = key_layer[:, :, word_size:, :] e2e_key_layer = key_layer[:, :, word_size:, :] # compute attention scores based on the dot product between the query and key vectors w2w_attention_scores = torch.matmul(w2w_query_layer, w2w_key_layer.transpose(-1, -2)) w2e_attention_scores = torch.matmul(w2e_query_layer, w2e_key_layer.transpose(-1, -2)) e2w_attention_scores = torch.matmul(e2w_query_layer, e2w_key_layer.transpose(-1, -2)) e2e_attention_scores = torch.matmul(e2e_query_layer, e2e_key_layer.transpose(-1, -2)) # combine attention scores to create the final attention score matrix word_attention_scores = torch.cat([w2w_attention_scores, w2e_attention_scores], dim=3) entity_attention_scores = torch.cat([e2w_attention_scores, e2e_attention_scores], dim=3) attention_scores = torch.cat([word_attention_scores, entity_attention_scores], dim=2) else: query_layer = self.transpose_for_scores(self.query(concat_hidden_states)) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in LukeModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) output_word_hidden_states = context_layer[:, :word_size, :] if entity_hidden_states is None: output_entity_hidden_states = None else: output_entity_hidden_states = context_layer[:, word_size:, :] if output_attentions: outputs = (output_word_hidden_states, output_entity_hidden_states, attention_probs) else: outputs = (output_word_hidden_states, output_entity_hidden_states) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput class LukeSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class LukeAttention(nn.Module): def __init__(self, config): super().__init__() self.self = LukeSelfAttention(config) self.output = LukeSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): raise NotImplementedError("LUKE does not support the pruning of attention heads") def forward( self, word_hidden_states, entity_hidden_states, attention_mask=None, head_mask=None, output_attentions=False, ): word_size = word_hidden_states.size(1) self_outputs = self.self( word_hidden_states, entity_hidden_states, attention_mask, head_mask, output_attentions, ) if entity_hidden_states is None: concat_self_outputs = self_outputs[0] concat_hidden_states = word_hidden_states else: concat_self_outputs = torch.cat(self_outputs[:2], dim=1) concat_hidden_states = torch.cat([word_hidden_states, entity_hidden_states], dim=1) attention_output = self.output(concat_self_outputs, concat_hidden_states) word_attention_output = attention_output[:, :word_size, :] if entity_hidden_states is None: entity_attention_output = None else: entity_attention_output = attention_output[:, word_size:, :] # add attentions if we output them outputs = (word_attention_output, entity_attention_output) + self_outputs[2:] return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate class LukeIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput class LukeOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class LukeLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = LukeAttention(config) self.intermediate = LukeIntermediate(config) self.output = LukeOutput(config) def forward( self, word_hidden_states, entity_hidden_states, attention_mask=None, head_mask=None, output_attentions=False, ): word_size = word_hidden_states.size(1) self_attention_outputs = self.attention( word_hidden_states, entity_hidden_states, attention_mask, head_mask, output_attentions=output_attentions, ) if entity_hidden_states is None: concat_attention_output = self_attention_outputs[0] else: concat_attention_output = torch.cat(self_attention_outputs[:2], dim=1) outputs = self_attention_outputs[2:] # add self attentions if we output attention weights layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, concat_attention_output ) word_layer_output = layer_output[:, :word_size, :] if entity_hidden_states is None: entity_layer_output = None else: entity_layer_output = layer_output[:, word_size:, :] outputs = (word_layer_output, entity_layer_output) + outputs return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class LukeEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([LukeLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, word_hidden_states, entity_hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_word_hidden_states = () if output_hidden_states else None all_entity_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_word_hidden_states = all_word_hidden_states + (word_hidden_states,) all_entity_hidden_states = all_entity_hidden_states + (entity_hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), word_hidden_states, entity_hidden_states, attention_mask, layer_head_mask, ) else: layer_outputs = layer_module( word_hidden_states, entity_hidden_states, attention_mask, layer_head_mask, output_attentions, ) word_hidden_states = layer_outputs[0] if entity_hidden_states is not None: entity_hidden_states = layer_outputs[1] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[2],) if output_hidden_states: all_word_hidden_states = all_word_hidden_states + (word_hidden_states,) all_entity_hidden_states = all_entity_hidden_states + (entity_hidden_states,) if not return_dict: return tuple( v for v in [ word_hidden_states, all_word_hidden_states, all_self_attentions, entity_hidden_states, all_entity_hidden_states, ] if v is not None ) return BaseLukeModelOutput( last_hidden_state=word_hidden_states, hidden_states=all_word_hidden_states, attentions=all_self_attentions, entity_last_hidden_state=entity_hidden_states, entity_hidden_states=all_entity_hidden_states, ) # Copied from transformers.models.bert.modeling_bert.BertPooler class LukePooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class EntityPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.entity_emb_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.entity_emb_size, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class EntityPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.config = config self.transform = EntityPredictionHeadTransform(config) self.decoder = nn.Linear(config.entity_emb_size, config.entity_vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.entity_vocab_size)) def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) + self.bias return hidden_states class LukePreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = LukeConfig base_model_prefix = "luke" supports_gradient_checkpointing = True _no_split_modules = ["LukeAttention", "LukeEntityEmbeddings"] def _init_weights(self, module: nn.Module): """Initialize the weights""" if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): if module.embedding_dim == 1: # embedding for bias parameters module.weight.data.zero_() else: module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, LukeEncoder): module.gradient_checkpointing = value LUKE_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`LukeConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ LUKE_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`LukeTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`): Indices of entity tokens in the entity vocabulary. Indices can be obtained using [`LukeTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*): Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`: - 1 for entity tokens that are **not masked**, - 0 for entity tokens that are **masked**. entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*): Segment token indices to indicate first and second portions of the entity token inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *portion A* entity token, - 1 corresponds to a *portion B* entity token. entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*): Indices of positions of each input entity in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare LUKE model transformer outputting raw hidden-states for both word tokens and entities without any" " specific head on top.", LUKE_START_DOCSTRING, ) class LukeModel(LukePreTrainedModel): _keys_to_ignore_on_load_missing = [r"position_ids"] def __init__(self, config: LukeConfig, add_pooling_layer: bool = True): super().__init__(config) self.config = config self.embeddings = LukeEmbeddings(config) self.entity_embeddings = LukeEntityEmbeddings(config) self.encoder = LukeEncoder(config) self.pooler = LukePooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def get_entity_embeddings(self): return self.entity_embeddings.entity_embeddings def set_entity_embeddings(self, value): self.entity_embeddings.entity_embeddings = value def _prune_heads(self, heads_to_prune): raise NotImplementedError("LUKE does not support the pruning of attention heads") @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=BaseLukeModelOutputWithPooling, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.FloatTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseLukeModelOutputWithPooling]: r""" Returns: Examples: ```python >>> from transformers import LukeTokenizer, LukeModel >>> tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base") >>> model = LukeModel.from_pretrained("studio-ousia/luke-base") # Compute the contextualized entity representation corresponding to the entity mention "Beyoncé" >>> text = "Beyoncé lives in Los Angeles." >>> entity_spans = [(0, 7)] # character-based entity span corresponding to "Beyoncé" >>> encoding = tokenizer(text, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt") >>> outputs = model(**encoding) >>> word_last_hidden_state = outputs.last_hidden_state >>> entity_last_hidden_state = outputs.entity_last_hidden_state # Input Wikipedia entities to obtain enriched contextualized representations of word tokens >>> text = "Beyoncé lives in Los Angeles." >>> entities = [ ... "Beyoncé", ... "Los Angeles", ... ] # Wikipedia entity titles corresponding to the entity mentions "Beyoncé" and "Los Angeles" >>> entity_spans = [ ... (0, 7), ... (17, 28), ... ] # character-based entity spans corresponding to "Beyoncé" and "Los Angeles" >>> encoding = tokenizer( ... text, entities=entities, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt" ... ) >>> outputs = model(**encoding) >>> word_last_hidden_state = outputs.last_hidden_state >>> entity_last_hidden_state = outputs.entity_last_hidden_state ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length), device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) if entity_ids is not None: entity_seq_length = entity_ids.size(1) if entity_attention_mask is None: entity_attention_mask = torch.ones((batch_size, entity_seq_length), device=device) if entity_token_type_ids is None: entity_token_type_ids = torch.zeros((batch_size, entity_seq_length), dtype=torch.long, device=device) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) # First, compute word embeddings word_embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) # Second, compute extended attention mask extended_attention_mask = self.get_extended_attention_mask(attention_mask, entity_attention_mask) # Third, compute entity embeddings and concatenate with word embeddings if entity_ids is None: entity_embedding_output = None else: entity_embedding_output = self.entity_embeddings(entity_ids, entity_position_ids, entity_token_type_ids) # Fourth, send embeddings through the model encoder_outputs = self.encoder( word_embedding_output, entity_embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # Fifth, get the output. LukeModel outputs the same as BertModel, namely sequence_output of shape (batch_size, seq_len, hidden_size) sequence_output = encoder_outputs[0] # Sixth, we compute the pooled_output, word_sequence_output and entity_sequence_output based on the sequence_output pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseLukeModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, entity_last_hidden_state=encoder_outputs.entity_last_hidden_state, entity_hidden_states=encoder_outputs.entity_hidden_states, ) def get_extended_attention_mask( self, word_attention_mask: torch.LongTensor, entity_attention_mask: Optional[torch.LongTensor] ): """ Makes broadcastable attention and causal masks so that future and masked tokens are ignored. Arguments: word_attention_mask (`torch.LongTensor`): Attention mask for word tokens with ones indicating tokens to attend to, zeros for tokens to ignore. entity_attention_mask (`torch.LongTensor`, *optional*): Attention mask for entity tokens with ones indicating tokens to attend to, zeros for tokens to ignore. Returns: `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`. """ attention_mask = word_attention_mask if entity_attention_mask is not None: attention_mask = torch.cat([attention_mask, entity_attention_mask], dim=-1) if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.dim() == 2: extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError(f"Wrong shape for attention_mask (shape {attention_mask.shape})") extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(self.dtype).min return extended_attention_mask def create_position_ids_from_input_ids(input_ids, padding_idx): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: torch.Tensor x: Returns: torch.Tensor """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask)) * mask return incremental_indices.long() + padding_idx # Copied from transformers.models.roberta.modeling_roberta.RobertaLMHead class LukeLMHead(nn.Module): """Roberta Head for masked language modeling.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.decoder = nn.Linear(config.hidden_size, config.vocab_size) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def forward(self, features, **kwargs): x = self.dense(features) x = gelu(x) x = self.layer_norm(x) # project back to size of vocabulary with bias x = self.decoder(x) return x def _tie_weights(self): # To tie those two weights if they get disconnected (on TPU or when the bias is resized) # For accelerate compatibility and to not break backward compatibility if self.decoder.bias.device.type == "meta": self.decoder.bias = self.bias else: self.bias = self.decoder.bias @add_start_docstrings( """ The LUKE model with a language modeling head and entity prediction head on top for masked language modeling and masked entity prediction. """, LUKE_START_DOCSTRING, ) class LukeForMaskedLM(LukePreTrainedModel): _keys_to_ignore_on_save = [ r"lm_head.decoder.weight", r"lm_head.decoder.bias", r"entity_predictions.decoder.weight", ] _keys_to_ignore_on_load_missing = [ r"position_ids", r"lm_head.decoder.weight", r"lm_head.decoder.bias", r"entity_predictions.decoder.weight", ] def __init__(self, config): super().__init__(config) self.luke = LukeModel(config) self.lm_head = LukeLMHead(config) self.entity_predictions = EntityPredictionHead(config) self.loss_fn = nn.CrossEntropyLoss(ignore_index=-1) # Initialize weights and apply final processing self.post_init() def tie_weights(self): super().tie_weights() self._tie_or_clone_weights(self.entity_predictions.decoder, self.luke.entity_embeddings.entity_embeddings) def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=LukeMaskedLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.LongTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, labels: Optional[torch.LongTensor] = None, entity_labels: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, LukeMaskedLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` entity_labels (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` Returns: """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.luke( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) loss = None mlm_loss = None logits = self.lm_head(outputs.last_hidden_state) if labels is not None: mlm_loss = self.loss_fn(logits.view(-1, self.config.vocab_size), labels.view(-1)) if loss is None: loss = mlm_loss mep_loss = None entity_logits = None if outputs.entity_last_hidden_state is not None: entity_logits = self.entity_predictions(outputs.entity_last_hidden_state) if entity_labels is not None: mep_loss = self.loss_fn(entity_logits.view(-1, self.config.entity_vocab_size), entity_labels.view(-1)) if loss is None: loss = mep_loss else: loss = loss + mep_loss if not return_dict: return tuple( v for v in [ loss, mlm_loss, mep_loss, logits, entity_logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions, ] if v is not None ) return LukeMaskedLMOutput( loss=loss, mlm_loss=mlm_loss, mep_loss=mep_loss, logits=logits, entity_logits=entity_logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ The LUKE model with a classification head on top (a linear layer on top of the hidden state of the first entity token) for entity classification tasks, such as Open Entity. """, LUKE_START_DOCSTRING, ) class LukeForEntityClassification(LukePreTrainedModel): def __init__(self, config): super().__init__(config) self.luke = LukeModel(config) self.num_labels = config.num_labels self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=EntityClassificationOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.FloatTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, EntityClassificationOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)` or `(batch_size, num_labels)`, *optional*): Labels for computing the classification loss. If the shape is `(batch_size,)`, the cross entropy loss is used for the single-label classification. In this case, labels should contain the indices that should be in `[0, ..., config.num_labels - 1]`. If the shape is `(batch_size, num_labels)`, the binary cross entropy loss is used for the multi-label classification. In this case, labels should only contain `[0, 1]`, where 0 and 1 indicate false and true, respectively. Returns: Examples: ```python >>> from transformers import LukeTokenizer, LukeForEntityClassification >>> tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-open-entity") >>> model = LukeForEntityClassification.from_pretrained("studio-ousia/luke-large-finetuned-open-entity") >>> text = "Beyoncé lives in Los Angeles." >>> entity_spans = [(0, 7)] # character-based entity span corresponding to "Beyoncé" >>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> predicted_class_idx = logits.argmax(-1).item() >>> print("Predicted class:", model.config.id2label[predicted_class_idx]) Predicted class: person ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.luke( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) feature_vector = outputs.entity_last_hidden_state[:, 0, :] feature_vector = self.dropout(feature_vector) logits = self.classifier(feature_vector) loss = None if labels is not None: # When the number of dimension of `labels` is 1, cross entropy is used as the loss function. The binary # cross entropy is used otherwise. if labels.ndim == 1: loss = nn.functional.cross_entropy(logits, labels) else: loss = nn.functional.binary_cross_entropy_with_logits(logits.view(-1), labels.view(-1).type_as(logits)) if not return_dict: return tuple( v for v in [loss, logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions] if v is not None ) return EntityClassificationOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ The LUKE model with a classification head on top (a linear layer on top of the hidden states of the two entity tokens) for entity pair classification tasks, such as TACRED. """, LUKE_START_DOCSTRING, ) class LukeForEntityPairClassification(LukePreTrainedModel): def __init__(self, config): super().__init__(config) self.luke = LukeModel(config) self.num_labels = config.num_labels self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size * 2, config.num_labels, False) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=EntityPairClassificationOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.FloatTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, EntityPairClassificationOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)` or `(batch_size, num_labels)`, *optional*): Labels for computing the classification loss. If the shape is `(batch_size,)`, the cross entropy loss is used for the single-label classification. In this case, labels should contain the indices that should be in `[0, ..., config.num_labels - 1]`. If the shape is `(batch_size, num_labels)`, the binary cross entropy loss is used for the multi-label classification. In this case, labels should only contain `[0, 1]`, where 0 and 1 indicate false and true, respectively. Returns: Examples: ```python >>> from transformers import LukeTokenizer, LukeForEntityPairClassification >>> tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-tacred") >>> model = LukeForEntityPairClassification.from_pretrained("studio-ousia/luke-large-finetuned-tacred") >>> text = "Beyoncé lives in Los Angeles." >>> entity_spans = [ ... (0, 7), ... (17, 28), ... ] # character-based entity spans corresponding to "Beyoncé" and "Los Angeles" >>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> predicted_class_idx = logits.argmax(-1).item() >>> print("Predicted class:", model.config.id2label[predicted_class_idx]) Predicted class: per:cities_of_residence ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.luke( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) feature_vector = torch.cat( [outputs.entity_last_hidden_state[:, 0, :], outputs.entity_last_hidden_state[:, 1, :]], dim=1 ) feature_vector = self.dropout(feature_vector) logits = self.classifier(feature_vector) loss = None if labels is not None: # When the number of dimension of `labels` is 1, cross entropy is used as the loss function. The binary # cross entropy is used otherwise. if labels.ndim == 1: loss = nn.functional.cross_entropy(logits, labels) else: loss = nn.functional.binary_cross_entropy_with_logits(logits.view(-1), labels.view(-1).type_as(logits)) if not return_dict: return tuple( v for v in [loss, logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions] if v is not None ) return EntityPairClassificationOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ The LUKE model with a span classification head on top (a linear layer on top of the hidden states output) for tasks such as named entity recognition. """, LUKE_START_DOCSTRING, ) class LukeForEntitySpanClassification(LukePreTrainedModel): def __init__(self, config): super().__init__(config) self.luke = LukeModel(config) self.num_labels = config.num_labels self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size * 3, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=EntitySpanClassificationOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask=None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.LongTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, entity_start_positions: Optional[torch.LongTensor] = None, entity_end_positions: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, EntitySpanClassificationOutput]: r""" entity_start_positions (`torch.LongTensor`): The start positions of entities in the word token sequence. entity_end_positions (`torch.LongTensor`): The end positions of entities in the word token sequence. labels (`torch.LongTensor` of shape `(batch_size, entity_length)` or `(batch_size, entity_length, num_labels)`, *optional*): Labels for computing the classification loss. If the shape is `(batch_size, entity_length)`, the cross entropy loss is used for the single-label classification. In this case, labels should contain the indices that should be in `[0, ..., config.num_labels - 1]`. If the shape is `(batch_size, entity_length, num_labels)`, the binary cross entropy loss is used for the multi-label classification. In this case, labels should only contain `[0, 1]`, where 0 and 1 indicate false and true, respectively. Returns: Examples: ```python >>> from transformers import LukeTokenizer, LukeForEntitySpanClassification >>> tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-conll-2003") >>> model = LukeForEntitySpanClassification.from_pretrained("studio-ousia/luke-large-finetuned-conll-2003") >>> text = "Beyoncé lives in Los Angeles" # List all possible entity spans in the text >>> word_start_positions = [0, 8, 14, 17, 21] # character-based start positions of word tokens >>> word_end_positions = [7, 13, 16, 20, 28] # character-based end positions of word tokens >>> entity_spans = [] >>> for i, start_pos in enumerate(word_start_positions): ... for end_pos in word_end_positions[i:]: ... entity_spans.append((start_pos, end_pos)) >>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> predicted_class_indices = logits.argmax(-1).squeeze().tolist() >>> for span, predicted_class_idx in zip(entity_spans, predicted_class_indices): ... if predicted_class_idx != 0: ... print(text[span[0] : span[1]], model.config.id2label[predicted_class_idx]) Beyoncé PER Los Angeles LOC ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.luke( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) hidden_size = outputs.last_hidden_state.size(-1) entity_start_positions = entity_start_positions.unsqueeze(-1).expand(-1, -1, hidden_size) if entity_start_positions.device != outputs.last_hidden_state.device: entity_start_positions = entity_start_positions.to(outputs.last_hidden_state.device) start_states = torch.gather(outputs.last_hidden_state, -2, entity_start_positions) entity_end_positions = entity_end_positions.unsqueeze(-1).expand(-1, -1, hidden_size) if entity_end_positions.device != outputs.last_hidden_state.device: entity_end_positions = entity_end_positions.to(outputs.last_hidden_state.device) end_states = torch.gather(outputs.last_hidden_state, -2, entity_end_positions) feature_vector = torch.cat([start_states, end_states, outputs.entity_last_hidden_state], dim=2) feature_vector = self.dropout(feature_vector) logits = self.classifier(feature_vector) loss = None if labels is not None: # When the number of dimension of `labels` is 2, cross entropy is used as the loss function. The binary # cross entropy is used otherwise. if labels.ndim == 2: loss = nn.functional.cross_entropy(logits.view(-1, self.num_labels), labels.view(-1)) else: loss = nn.functional.binary_cross_entropy_with_logits(logits.view(-1), labels.view(-1).type_as(logits)) if not return_dict: return tuple( v for v in [loss, logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions] if v is not None ) return EntitySpanClassificationOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ The LUKE Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, LUKE_START_DOCSTRING, ) class LukeForSequenceClassification(LukePreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.luke = LukeModel(config) self.dropout = nn.Dropout( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=LukeSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.FloatTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, LukeSequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.luke( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) pooled_output = outputs.pooler_output pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: return tuple( v for v in [loss, logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions] if v is not None ) return LukeSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ The LUKE Model with a token classification head on top (a linear layer on top of the hidden-states output). To solve Named-Entity Recognition (NER) task using LUKE, `LukeForEntitySpanClassification` is more suitable than this class. """, LUKE_START_DOCSTRING, ) class LukeForTokenClassification(LukePreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.luke = LukeModel(config, add_pooling_layer=False) self.dropout = nn.Dropout( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=LukeTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.FloatTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, LukeTokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.luke( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) sequence_output = outputs.last_hidden_state sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: return tuple( v for v in [loss, logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions] if v is not None ) return LukeTokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ The LUKE Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, LUKE_START_DOCSTRING, ) class LukeForQuestionAnswering(LukePreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.luke = LukeModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=LukeQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.FloatTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.FloatTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, LukeQuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.luke( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) sequence_output = outputs.last_hidden_state logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: return tuple( v for v in [ total_loss, start_logits, end_logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions, ] if v is not None ) return LukeQuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ The LUKE Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, LUKE_START_DOCSTRING, ) class LukeForMultipleChoice(LukePreTrainedModel): def __init__(self, config): super().__init__(config) self.luke = LukeModel(config) self.dropout = nn.Dropout( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=LukeMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.FloatTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, LukeMultipleChoiceModelOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) entity_ids = entity_ids.view(-1, entity_ids.size(-1)) if entity_ids is not None else None entity_attention_mask = ( entity_attention_mask.view(-1, entity_attention_mask.size(-1)) if entity_attention_mask is not None else None ) entity_token_type_ids = ( entity_token_type_ids.view(-1, entity_token_type_ids.size(-1)) if entity_token_type_ids is not None else None ) entity_position_ids = ( entity_position_ids.view(-1, entity_position_ids.size(-2), entity_position_ids.size(-1)) if entity_position_ids is not None else None ) outputs = self.luke( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) pooled_output = outputs.pooler_output pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: return tuple( v for v in [ loss, reshaped_logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions, ] if v is not None ) return LukeMultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions, )
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./src/transformers/models/cpm/__init__.py
# flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all. # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available _import_structure = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_cpm"] = ["CpmTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_cpm_fast"] = ["CpmTokenizerFast"] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_cpm import CpmTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_cpm_fast import CpmTokenizerFast else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
# flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all. # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available _import_structure = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_cpm"] = ["CpmTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_cpm_fast"] = ["CpmTokenizerFast"] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_cpm import CpmTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_cpm_fast import CpmTokenizerFast else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./src/transformers/models/rag/retrieval_rag.py
# coding=utf-8 # Copyright 2020, The RAG Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """RAG Retriever model implementation.""" import os import pickle import time from typing import Iterable, List, Optional, Tuple import numpy as np from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding from ...utils import cached_file, is_datasets_available, is_faiss_available, logging, requires_backends from .configuration_rag import RagConfig from .tokenization_rag import RagTokenizer if is_datasets_available(): from datasets import Dataset, load_dataset, load_from_disk if is_faiss_available(): import faiss logger = logging.get_logger(__name__) LEGACY_INDEX_PATH = "https://storage.googleapis.com/huggingface-nlp/datasets/wiki_dpr/" class Index: """ A base class for the Indices encapsulated by the [`RagRetriever`]. """ def get_doc_dicts(self, doc_ids: np.ndarray) -> List[dict]: """ Returns a list of dictionaries, containing titles and text of the retrieved documents. Args: doc_ids (`np.ndarray` of shape `(batch_size, n_docs)`): A tensor of document indices. """ raise NotImplementedError def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> Tuple[np.ndarray, np.ndarray]: """ For each query in the batch, retrieves `n_docs` documents. Args: question_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`): An array of query vectors. n_docs (`int`): The number of docs retrieved per query. Returns: `np.ndarray` of shape `(batch_size, n_docs)`: A tensor of indices of retrieved documents. `np.ndarray` of shape `(batch_size, vector_size)`: A tensor of vector representations of retrieved documents. """ raise NotImplementedError def is_initialized(self): """ Returns `True` if index is already initialized. """ raise NotImplementedError def init_index(self): """ A function responsible for loading the index into memory. Should be called only once per training run of a RAG model. E.g. if the model is trained on multiple GPUs in a distributed setup, only one of the workers will load the index. """ raise NotImplementedError class LegacyIndex(Index): """ An index which can be deserialized from the files built using https://github.com/facebookresearch/DPR. We use default faiss index parameters as specified in that repository. Args: vector_size (`int`): The dimension of indexed vectors. index_path (`str`): A path to a *directory* containing index files compatible with [`~models.rag.retrieval_rag.LegacyIndex`] """ INDEX_FILENAME = "hf_bert_base.hnswSQ8_correct_phi_128.c_index" PASSAGE_FILENAME = "psgs_w100.tsv.pkl" def __init__(self, vector_size, index_path): self.index_id_to_db_id = [] self.index_path = index_path self.passages = self._load_passages() self.vector_size = vector_size self.index = None self._index_initialized = False def _resolve_path(self, index_path, filename): is_local = os.path.isdir(index_path) try: # Load from URL or cache if already cached resolved_archive_file = cached_file(index_path, filename) except EnvironmentError: msg = ( f"Can't load '{filename}'. Make sure that:\n\n" f"- '{index_path}' is a correct remote path to a directory containing a file named {filename}\n\n" f"- or '{index_path}' is the correct path to a directory containing a file named {filename}.\n\n" ) raise EnvironmentError(msg) if is_local: logger.info(f"loading file {resolved_archive_file}") else: logger.info(f"loading file {filename} from cache at {resolved_archive_file}") return resolved_archive_file def _load_passages(self): logger.info(f"Loading passages from {self.index_path}") passages_path = self._resolve_path(self.index_path, self.PASSAGE_FILENAME) with open(passages_path, "rb") as passages_file: passages = pickle.load(passages_file) return passages def _deserialize_index(self): logger.info(f"Loading index from {self.index_path}") resolved_index_path = self._resolve_path(self.index_path, self.INDEX_FILENAME + ".index.dpr") self.index = faiss.read_index(resolved_index_path) resolved_meta_path = self._resolve_path(self.index_path, self.INDEX_FILENAME + ".index_meta.dpr") with open(resolved_meta_path, "rb") as metadata_file: self.index_id_to_db_id = pickle.load(metadata_file) assert ( len(self.index_id_to_db_id) == self.index.ntotal ), "Deserialized index_id_to_db_id should match faiss index size" def is_initialized(self): return self._index_initialized def init_index(self): index = faiss.IndexHNSWFlat(self.vector_size + 1, 512) index.hnsw.efSearch = 128 index.hnsw.efConstruction = 200 self.index = index self._deserialize_index() self._index_initialized = True def get_doc_dicts(self, doc_ids: np.array): doc_list = [] for doc_ids_i in doc_ids: ids = [str(int(doc_id)) for doc_id in doc_ids_i] docs = [self.passages[doc_id] for doc_id in ids] doc_list.append(docs) doc_dicts = [] for docs in doc_list: doc_dict = {} doc_dict["title"] = [doc[1] for doc in docs] doc_dict["text"] = [doc[0] for doc in docs] doc_dicts.append(doc_dict) return doc_dicts def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> Tuple[np.ndarray, np.ndarray]: aux_dim = np.zeros(len(question_hidden_states), dtype="float32").reshape(-1, 1) query_nhsw_vectors = np.hstack((question_hidden_states, aux_dim)) _, docs_ids = self.index.search(query_nhsw_vectors, n_docs) vectors = [[self.index.reconstruct(int(doc_id))[:-1] for doc_id in doc_ids] for doc_ids in docs_ids] ids = [[int(self.index_id_to_db_id[doc_id]) for doc_id in doc_ids] for doc_ids in docs_ids] return np.array(ids), np.array(vectors) class HFIndexBase(Index): def __init__(self, vector_size, dataset, index_initialized=False): self.vector_size = vector_size self.dataset = dataset self._index_initialized = index_initialized self._check_dataset_format(with_index=index_initialized) dataset.set_format("numpy", columns=["embeddings"], output_all_columns=True, dtype="float32") def _check_dataset_format(self, with_index: bool): if not isinstance(self.dataset, Dataset): raise ValueError(f"Dataset should be a datasets.Dataset object, but got {type(self.dataset)}") if len({"title", "text", "embeddings"} - set(self.dataset.column_names)) > 0: raise ValueError( "Dataset should be a dataset with the following columns: " "title (str), text (str) and embeddings (arrays of dimension vector_size), " f"but got columns {self.dataset.column_names}" ) if with_index and "embeddings" not in self.dataset.list_indexes(): raise ValueError( "Missing faiss index in the dataset. Make sure you called `dataset.add_faiss_index` to compute it " "or `dataset.load_faiss_index` to load one from the disk." ) def init_index(self): raise NotImplementedError() def is_initialized(self): return self._index_initialized def get_doc_dicts(self, doc_ids: np.ndarray) -> List[dict]: return [self.dataset[doc_ids[i].tolist()] for i in range(doc_ids.shape[0])] def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> Tuple[np.ndarray, np.ndarray]: _, ids = self.dataset.search_batch("embeddings", question_hidden_states, n_docs) docs = [self.dataset[[i for i in indices if i >= 0]] for indices in ids] vectors = [doc["embeddings"] for doc in docs] for i in range(len(vectors)): if len(vectors[i]) < n_docs: vectors[i] = np.vstack([vectors[i], np.zeros((n_docs - len(vectors[i]), self.vector_size))]) return np.array(ids), np.array(vectors) # shapes (batch_size, n_docs) and (batch_size, n_docs, d) class CanonicalHFIndex(HFIndexBase): """ A wrapper around an instance of [`~datasets.Datasets`]. If `index_path` is set to `None`, we load the pre-computed index available with the [`~datasets.arrow_dataset.Dataset`], otherwise, we load the index from the indicated path on disk. Args: vector_size (`int`): the dimension of the passages embeddings used by the index dataset_name (`str`, optional, defaults to `wiki_dpr`): A dataset identifier of the indexed dataset on HuggingFace AWS bucket (list all available datasets and ids with `datasets.list_datasets()`). dataset_split (`str`, optional, defaults to `train`) Which split of the `dataset` to load. index_name (`str`, optional, defaults to `train`) The index_name of the index associated with the `dataset`. The index loaded from `index_path` will be saved under this name. index_path (`str`, optional, defaults to `None`) The path to the serialized faiss index on disk. use_dummy_dataset (`bool`, optional, defaults to `False`): If True, use the dummy configuration of the dataset for tests. """ def __init__( self, vector_size: int, dataset_name: str = "wiki_dpr", dataset_split: str = "train", index_name: Optional[str] = None, index_path: Optional[str] = None, use_dummy_dataset=False, ): if int(index_path is None) + int(index_name is None) != 1: raise ValueError("Please provide `index_name` or `index_path`.") self.dataset_name = dataset_name self.dataset_split = dataset_split self.index_name = index_name self.index_path = index_path self.use_dummy_dataset = use_dummy_dataset logger.info(f"Loading passages from {self.dataset_name}") dataset = load_dataset( self.dataset_name, with_index=False, split=self.dataset_split, dummy=self.use_dummy_dataset ) super().__init__(vector_size, dataset, index_initialized=False) def init_index(self): if self.index_path is not None: logger.info(f"Loading index from {self.index_path}") self.dataset.load_faiss_index("embeddings", file=self.index_path) else: logger.info(f"Loading index from {self.dataset_name} with index name {self.index_name}") self.dataset = load_dataset( self.dataset_name, with_embeddings=True, with_index=True, split=self.dataset_split, index_name=self.index_name, dummy=self.use_dummy_dataset, ) self.dataset.set_format("numpy", columns=["embeddings"], output_all_columns=True) self._index_initialized = True class CustomHFIndex(HFIndexBase): """ A wrapper around an instance of [`~datasets.Datasets`]. The dataset and the index are both loaded from the indicated paths on disk. Args: vector_size (`int`): the dimension of the passages embeddings used by the index dataset_path (`str`): The path to the serialized dataset on disk. The dataset should have 3 columns: title (str), text (str) and embeddings (arrays of dimension vector_size) index_path (`str`) The path to the serialized faiss index on disk. """ def __init__(self, vector_size: int, dataset, index_path=None): super().__init__(vector_size, dataset, index_initialized=index_path is None) self.index_path = index_path @classmethod def load_from_disk(cls, vector_size, dataset_path, index_path): logger.info(f"Loading passages from {dataset_path}") if dataset_path is None or index_path is None: raise ValueError( "Please provide `dataset_path` and `index_path` after calling `dataset.save_to_disk(dataset_path)` " "and `dataset.get_index('embeddings').save(index_path)`." ) dataset = load_from_disk(dataset_path) return cls(vector_size=vector_size, dataset=dataset, index_path=index_path) def init_index(self): if not self.is_initialized(): logger.info(f"Loading index from {self.index_path}") self.dataset.load_faiss_index("embeddings", file=self.index_path) self._index_initialized = True class RagRetriever: """ Retriever used to get documents from vector queries. It retrieves the documents embeddings as well as the documents contents, and it formats them to be used with a RagModel. Args: config ([`RagConfig`]): The configuration of the RAG model this Retriever is used with. Contains parameters indicating which `Index` to build. You can load your own custom dataset with `config.index_name="custom"` or use a canonical one (default) from the datasets library with `config.index_name="wiki_dpr"` for example. question_encoder_tokenizer ([`PreTrainedTokenizer`]): The tokenizer that was used to tokenize the question. It is used to decode the question and then use the generator_tokenizer. generator_tokenizer ([`PreTrainedTokenizer`]): The tokenizer used for the generator part of the RagModel. index ([`~models.rag.retrieval_rag.Index`], optional, defaults to the one defined by the configuration): If specified, use this index instead of the one built using the configuration Examples: ```python >>> # To load the default "wiki_dpr" dataset with 21M passages from wikipedia (index name is 'compressed' or 'exact') >>> from transformers import RagRetriever >>> retriever = RagRetriever.from_pretrained( ... "facebook/dpr-ctx_encoder-single-nq-base", dataset="wiki_dpr", index_name="compressed" ... ) >>> # To load your own indexed dataset built with the datasets library. More info on how to build the indexed dataset in examples/rag/use_own_knowledge_dataset.py >>> from transformers import RagRetriever >>> dataset = ( ... ... ... ) # dataset must be a datasets.Datasets object with columns "title", "text" and "embeddings", and it must have a faiss index >>> retriever = RagRetriever.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base", indexed_dataset=dataset) >>> # To load your own indexed dataset built with the datasets library that was saved on disk. More info in examples/rag/use_own_knowledge_dataset.py >>> from transformers import RagRetriever >>> dataset_path = "path/to/my/dataset" # dataset saved via *dataset.save_to_disk(...)* >>> index_path = "path/to/my/index.faiss" # faiss index saved via *dataset.get_index("embeddings").save(...)* >>> retriever = RagRetriever.from_pretrained( ... "facebook/dpr-ctx_encoder-single-nq-base", ... index_name="custom", ... passages_path=dataset_path, ... index_path=index_path, ... ) >>> # To load the legacy index built originally for Rag's paper >>> from transformers import RagRetriever >>> retriever = RagRetriever.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base", index_name="legacy") ```""" def __init__(self, config, question_encoder_tokenizer, generator_tokenizer, index=None, init_retrieval=True): self._init_retrieval = init_retrieval requires_backends(self, ["datasets", "faiss"]) super().__init__() self.index = index or self._build_index(config) self.generator_tokenizer = generator_tokenizer self.question_encoder_tokenizer = question_encoder_tokenizer self.n_docs = config.n_docs self.batch_size = config.retrieval_batch_size self.config = config if self._init_retrieval: self.init_retrieval() self.ctx_encoder_tokenizer = None self.return_tokenized_docs = False @staticmethod def _build_index(config): if config.index_name == "legacy": return LegacyIndex( config.retrieval_vector_size, config.index_path or LEGACY_INDEX_PATH, ) elif config.index_name == "custom": return CustomHFIndex.load_from_disk( vector_size=config.retrieval_vector_size, dataset_path=config.passages_path, index_path=config.index_path, ) else: return CanonicalHFIndex( vector_size=config.retrieval_vector_size, dataset_name=config.dataset, dataset_split=config.dataset_split, index_name=config.index_name, index_path=config.index_path, use_dummy_dataset=config.use_dummy_dataset, ) @classmethod def from_pretrained(cls, retriever_name_or_path, indexed_dataset=None, **kwargs): requires_backends(cls, ["datasets", "faiss"]) config = kwargs.pop("config", None) or RagConfig.from_pretrained(retriever_name_or_path, **kwargs) rag_tokenizer = RagTokenizer.from_pretrained(retriever_name_or_path, config=config) question_encoder_tokenizer = rag_tokenizer.question_encoder generator_tokenizer = rag_tokenizer.generator if indexed_dataset is not None: config.index_name = "custom" index = CustomHFIndex(config.retrieval_vector_size, indexed_dataset) else: index = cls._build_index(config) return cls( config, question_encoder_tokenizer=question_encoder_tokenizer, generator_tokenizer=generator_tokenizer, index=index, ) def save_pretrained(self, save_directory): if isinstance(self.index, CustomHFIndex): if self.config.index_path is None: index_path = os.path.join(save_directory, "hf_dataset_index.faiss") self.index.dataset.get_index("embeddings").save(index_path) self.config.index_path = index_path if self.config.passages_path is None: passages_path = os.path.join(save_directory, "hf_dataset") # datasets don't support save_to_disk with indexes right now faiss_index = self.index.dataset._indexes.pop("embeddings") self.index.dataset.save_to_disk(passages_path) self.index.dataset._indexes["embeddings"] = faiss_index self.config.passages_path = passages_path self.config.save_pretrained(save_directory) rag_tokenizer = RagTokenizer( question_encoder=self.question_encoder_tokenizer, generator=self.generator_tokenizer, ) rag_tokenizer.save_pretrained(save_directory) def init_retrieval(self): """ Retriever initialization function. It loads the index into memory. """ logger.info("initializing retrieval") self.index.init_index() def postprocess_docs(self, docs, input_strings, prefix, n_docs, return_tensors=None): r""" Postprocessing retrieved `docs` and combining them with `input_strings`. Args: docs (`dict`): Retrieved documents. input_strings (`str`): Input strings decoded by `preprocess_query`. prefix (`str`): Prefix added at the beginning of each input, typically used with T5-based models. Return: `tuple(tensors)`: a tuple consisting of two elements: contextualized `input_ids` and a compatible `attention_mask`. """ def cat_input_and_doc(doc_title, doc_text, input_string, prefix): # TODO(Patrick): if we train more RAG models, I want to put the input first to take advantage of effortless truncation # TODO(piktus): better handling of truncation if doc_title.startswith('"'): doc_title = doc_title[1:] if doc_title.endswith('"'): doc_title = doc_title[:-1] if prefix is None: prefix = "" out = (prefix + doc_title + self.config.title_sep + doc_text + self.config.doc_sep + input_string).replace( " ", " " ) return out rag_input_strings = [ cat_input_and_doc( docs[i]["title"][j], docs[i]["text"][j], input_strings[i], prefix, ) for i in range(len(docs)) for j in range(n_docs) ] contextualized_inputs = self.generator_tokenizer.batch_encode_plus( rag_input_strings, max_length=self.config.max_combined_length, return_tensors=return_tensors, padding="max_length", truncation=True, ) return contextualized_inputs["input_ids"], contextualized_inputs["attention_mask"] def _chunk_tensor(self, t: Iterable, chunk_size: int) -> List[Iterable]: return [t[i : i + chunk_size] for i in range(0, len(t), chunk_size)] def _main_retrieve(self, question_hidden_states: np.ndarray, n_docs: int) -> Tuple[np.ndarray, np.ndarray]: question_hidden_states_batched = self._chunk_tensor(question_hidden_states, self.batch_size) ids_batched = [] vectors_batched = [] for question_hidden_states in question_hidden_states_batched: start_time = time.time() ids, vectors = self.index.get_top_docs(question_hidden_states, n_docs) logger.debug( f"index search time: {time.time() - start_time} sec, batch size {question_hidden_states.shape}" ) ids_batched.extend(ids) vectors_batched.extend(vectors) return ( np.array(ids_batched), np.array(vectors_batched), ) # shapes (batch_size, n_docs) and (batch_size, n_docs, d) def retrieve(self, question_hidden_states: np.ndarray, n_docs: int) -> Tuple[np.ndarray, List[dict]]: """ Retrieves documents for specified `question_hidden_states`. Args: question_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`): A batch of query vectors to retrieve with. n_docs (`int`): The number of docs retrieved per query. Return: `Tuple[np.ndarray, np.ndarray, List[dict]]`: A tuple with the following objects: - **retrieved_doc_embeds** (`np.ndarray` of shape `(batch_size, n_docs, dim)`) -- The retrieval embeddings of the retrieved docs per query. - **doc_ids** (`np.ndarray` of shape `(batch_size, n_docs)`) -- The ids of the documents in the index - **doc_dicts** (`List[dict]`): The `retrieved_doc_embeds` examples per query. """ doc_ids, retrieved_doc_embeds = self._main_retrieve(question_hidden_states, n_docs) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(doc_ids) def set_ctx_encoder_tokenizer(self, ctx_encoder_tokenizer: PreTrainedTokenizer): # used in end2end retriever training self.ctx_encoder_tokenizer = ctx_encoder_tokenizer self.return_tokenized_docs = True def __call__( self, question_input_ids: List[List[int]], question_hidden_states: np.ndarray, prefix=None, n_docs=None, return_tensors=None, ) -> BatchEncoding: """ Retrieves documents for specified `question_hidden_states`. Args: question_input_ids: (`List[List[int]]`) batch of input ids question_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`: A batch of query vectors to retrieve with. prefix: (`str`, *optional*): The prefix used by the generator's tokenizer. n_docs (`int`, *optional*): The number of docs retrieved per query. return_tensors (`str` or [`~utils.TensorType`], *optional*, defaults to "pt"): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. Returns: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **context_input_ids** -- List of token ids to be fed to a model. [What are input IDs?](../glossary#input-ids) - **context_attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`). [What are attention masks?](../glossary#attention-mask) - **retrieved_doc_embeds** -- List of embeddings of the retrieved documents - **doc_ids** -- List of ids of the retrieved documents """ n_docs = n_docs if n_docs is not None else self.n_docs prefix = prefix if prefix is not None else self.config.generator.prefix retrieved_doc_embeds, doc_ids, docs = self.retrieve(question_hidden_states, n_docs) input_strings = self.question_encoder_tokenizer.batch_decode(question_input_ids, skip_special_tokens=True) context_input_ids, context_attention_mask = self.postprocess_docs( docs, input_strings, prefix, n_docs, return_tensors=return_tensors ) if self.return_tokenized_docs: retrieved_doc_text = [] retrieved_doc_title = [] for b_idx in range(len(docs)): for doc_idx in range(n_docs): retrieved_doc_text.append(docs[b_idx]["text"][doc_idx]) retrieved_doc_title.append(docs[b_idx]["title"][doc_idx]) tokenized_docs = self.ctx_encoder_tokenizer( retrieved_doc_title, retrieved_doc_text, truncation=True, padding="longest", return_tensors=return_tensors, ) return BatchEncoding( { "context_input_ids": context_input_ids, "context_attention_mask": context_attention_mask, "retrieved_doc_embeds": retrieved_doc_embeds, "doc_ids": doc_ids, "tokenized_doc_ids": tokenized_docs["input_ids"], "tokenized_doc_attention_mask": tokenized_docs["attention_mask"], }, tensor_type=return_tensors, ) else: return BatchEncoding( { "context_input_ids": context_input_ids, "context_attention_mask": context_attention_mask, "retrieved_doc_embeds": retrieved_doc_embeds, "doc_ids": doc_ids, }, tensor_type=return_tensors, )
# coding=utf-8 # Copyright 2020, The RAG Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """RAG Retriever model implementation.""" import os import pickle import time from typing import Iterable, List, Optional, Tuple import numpy as np from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding from ...utils import cached_file, is_datasets_available, is_faiss_available, logging, requires_backends from .configuration_rag import RagConfig from .tokenization_rag import RagTokenizer if is_datasets_available(): from datasets import Dataset, load_dataset, load_from_disk if is_faiss_available(): import faiss logger = logging.get_logger(__name__) LEGACY_INDEX_PATH = "https://storage.googleapis.com/huggingface-nlp/datasets/wiki_dpr/" class Index: """ A base class for the Indices encapsulated by the [`RagRetriever`]. """ def get_doc_dicts(self, doc_ids: np.ndarray) -> List[dict]: """ Returns a list of dictionaries, containing titles and text of the retrieved documents. Args: doc_ids (`np.ndarray` of shape `(batch_size, n_docs)`): A tensor of document indices. """ raise NotImplementedError def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> Tuple[np.ndarray, np.ndarray]: """ For each query in the batch, retrieves `n_docs` documents. Args: question_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`): An array of query vectors. n_docs (`int`): The number of docs retrieved per query. Returns: `np.ndarray` of shape `(batch_size, n_docs)`: A tensor of indices of retrieved documents. `np.ndarray` of shape `(batch_size, vector_size)`: A tensor of vector representations of retrieved documents. """ raise NotImplementedError def is_initialized(self): """ Returns `True` if index is already initialized. """ raise NotImplementedError def init_index(self): """ A function responsible for loading the index into memory. Should be called only once per training run of a RAG model. E.g. if the model is trained on multiple GPUs in a distributed setup, only one of the workers will load the index. """ raise NotImplementedError class LegacyIndex(Index): """ An index which can be deserialized from the files built using https://github.com/facebookresearch/DPR. We use default faiss index parameters as specified in that repository. Args: vector_size (`int`): The dimension of indexed vectors. index_path (`str`): A path to a *directory* containing index files compatible with [`~models.rag.retrieval_rag.LegacyIndex`] """ INDEX_FILENAME = "hf_bert_base.hnswSQ8_correct_phi_128.c_index" PASSAGE_FILENAME = "psgs_w100.tsv.pkl" def __init__(self, vector_size, index_path): self.index_id_to_db_id = [] self.index_path = index_path self.passages = self._load_passages() self.vector_size = vector_size self.index = None self._index_initialized = False def _resolve_path(self, index_path, filename): is_local = os.path.isdir(index_path) try: # Load from URL or cache if already cached resolved_archive_file = cached_file(index_path, filename) except EnvironmentError: msg = ( f"Can't load '{filename}'. Make sure that:\n\n" f"- '{index_path}' is a correct remote path to a directory containing a file named {filename}\n\n" f"- or '{index_path}' is the correct path to a directory containing a file named {filename}.\n\n" ) raise EnvironmentError(msg) if is_local: logger.info(f"loading file {resolved_archive_file}") else: logger.info(f"loading file {filename} from cache at {resolved_archive_file}") return resolved_archive_file def _load_passages(self): logger.info(f"Loading passages from {self.index_path}") passages_path = self._resolve_path(self.index_path, self.PASSAGE_FILENAME) with open(passages_path, "rb") as passages_file: passages = pickle.load(passages_file) return passages def _deserialize_index(self): logger.info(f"Loading index from {self.index_path}") resolved_index_path = self._resolve_path(self.index_path, self.INDEX_FILENAME + ".index.dpr") self.index = faiss.read_index(resolved_index_path) resolved_meta_path = self._resolve_path(self.index_path, self.INDEX_FILENAME + ".index_meta.dpr") with open(resolved_meta_path, "rb") as metadata_file: self.index_id_to_db_id = pickle.load(metadata_file) assert ( len(self.index_id_to_db_id) == self.index.ntotal ), "Deserialized index_id_to_db_id should match faiss index size" def is_initialized(self): return self._index_initialized def init_index(self): index = faiss.IndexHNSWFlat(self.vector_size + 1, 512) index.hnsw.efSearch = 128 index.hnsw.efConstruction = 200 self.index = index self._deserialize_index() self._index_initialized = True def get_doc_dicts(self, doc_ids: np.array): doc_list = [] for doc_ids_i in doc_ids: ids = [str(int(doc_id)) for doc_id in doc_ids_i] docs = [self.passages[doc_id] for doc_id in ids] doc_list.append(docs) doc_dicts = [] for docs in doc_list: doc_dict = {} doc_dict["title"] = [doc[1] for doc in docs] doc_dict["text"] = [doc[0] for doc in docs] doc_dicts.append(doc_dict) return doc_dicts def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> Tuple[np.ndarray, np.ndarray]: aux_dim = np.zeros(len(question_hidden_states), dtype="float32").reshape(-1, 1) query_nhsw_vectors = np.hstack((question_hidden_states, aux_dim)) _, docs_ids = self.index.search(query_nhsw_vectors, n_docs) vectors = [[self.index.reconstruct(int(doc_id))[:-1] for doc_id in doc_ids] for doc_ids in docs_ids] ids = [[int(self.index_id_to_db_id[doc_id]) for doc_id in doc_ids] for doc_ids in docs_ids] return np.array(ids), np.array(vectors) class HFIndexBase(Index): def __init__(self, vector_size, dataset, index_initialized=False): self.vector_size = vector_size self.dataset = dataset self._index_initialized = index_initialized self._check_dataset_format(with_index=index_initialized) dataset.set_format("numpy", columns=["embeddings"], output_all_columns=True, dtype="float32") def _check_dataset_format(self, with_index: bool): if not isinstance(self.dataset, Dataset): raise ValueError(f"Dataset should be a datasets.Dataset object, but got {type(self.dataset)}") if len({"title", "text", "embeddings"} - set(self.dataset.column_names)) > 0: raise ValueError( "Dataset should be a dataset with the following columns: " "title (str), text (str) and embeddings (arrays of dimension vector_size), " f"but got columns {self.dataset.column_names}" ) if with_index and "embeddings" not in self.dataset.list_indexes(): raise ValueError( "Missing faiss index in the dataset. Make sure you called `dataset.add_faiss_index` to compute it " "or `dataset.load_faiss_index` to load one from the disk." ) def init_index(self): raise NotImplementedError() def is_initialized(self): return self._index_initialized def get_doc_dicts(self, doc_ids: np.ndarray) -> List[dict]: return [self.dataset[doc_ids[i].tolist()] for i in range(doc_ids.shape[0])] def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> Tuple[np.ndarray, np.ndarray]: _, ids = self.dataset.search_batch("embeddings", question_hidden_states, n_docs) docs = [self.dataset[[i for i in indices if i >= 0]] for indices in ids] vectors = [doc["embeddings"] for doc in docs] for i in range(len(vectors)): if len(vectors[i]) < n_docs: vectors[i] = np.vstack([vectors[i], np.zeros((n_docs - len(vectors[i]), self.vector_size))]) return np.array(ids), np.array(vectors) # shapes (batch_size, n_docs) and (batch_size, n_docs, d) class CanonicalHFIndex(HFIndexBase): """ A wrapper around an instance of [`~datasets.Datasets`]. If `index_path` is set to `None`, we load the pre-computed index available with the [`~datasets.arrow_dataset.Dataset`], otherwise, we load the index from the indicated path on disk. Args: vector_size (`int`): the dimension of the passages embeddings used by the index dataset_name (`str`, optional, defaults to `wiki_dpr`): A dataset identifier of the indexed dataset on HuggingFace AWS bucket (list all available datasets and ids with `datasets.list_datasets()`). dataset_split (`str`, optional, defaults to `train`) Which split of the `dataset` to load. index_name (`str`, optional, defaults to `train`) The index_name of the index associated with the `dataset`. The index loaded from `index_path` will be saved under this name. index_path (`str`, optional, defaults to `None`) The path to the serialized faiss index on disk. use_dummy_dataset (`bool`, optional, defaults to `False`): If True, use the dummy configuration of the dataset for tests. """ def __init__( self, vector_size: int, dataset_name: str = "wiki_dpr", dataset_split: str = "train", index_name: Optional[str] = None, index_path: Optional[str] = None, use_dummy_dataset=False, ): if int(index_path is None) + int(index_name is None) != 1: raise ValueError("Please provide `index_name` or `index_path`.") self.dataset_name = dataset_name self.dataset_split = dataset_split self.index_name = index_name self.index_path = index_path self.use_dummy_dataset = use_dummy_dataset logger.info(f"Loading passages from {self.dataset_name}") dataset = load_dataset( self.dataset_name, with_index=False, split=self.dataset_split, dummy=self.use_dummy_dataset ) super().__init__(vector_size, dataset, index_initialized=False) def init_index(self): if self.index_path is not None: logger.info(f"Loading index from {self.index_path}") self.dataset.load_faiss_index("embeddings", file=self.index_path) else: logger.info(f"Loading index from {self.dataset_name} with index name {self.index_name}") self.dataset = load_dataset( self.dataset_name, with_embeddings=True, with_index=True, split=self.dataset_split, index_name=self.index_name, dummy=self.use_dummy_dataset, ) self.dataset.set_format("numpy", columns=["embeddings"], output_all_columns=True) self._index_initialized = True class CustomHFIndex(HFIndexBase): """ A wrapper around an instance of [`~datasets.Datasets`]. The dataset and the index are both loaded from the indicated paths on disk. Args: vector_size (`int`): the dimension of the passages embeddings used by the index dataset_path (`str`): The path to the serialized dataset on disk. The dataset should have 3 columns: title (str), text (str) and embeddings (arrays of dimension vector_size) index_path (`str`) The path to the serialized faiss index on disk. """ def __init__(self, vector_size: int, dataset, index_path=None): super().__init__(vector_size, dataset, index_initialized=index_path is None) self.index_path = index_path @classmethod def load_from_disk(cls, vector_size, dataset_path, index_path): logger.info(f"Loading passages from {dataset_path}") if dataset_path is None or index_path is None: raise ValueError( "Please provide `dataset_path` and `index_path` after calling `dataset.save_to_disk(dataset_path)` " "and `dataset.get_index('embeddings').save(index_path)`." ) dataset = load_from_disk(dataset_path) return cls(vector_size=vector_size, dataset=dataset, index_path=index_path) def init_index(self): if not self.is_initialized(): logger.info(f"Loading index from {self.index_path}") self.dataset.load_faiss_index("embeddings", file=self.index_path) self._index_initialized = True class RagRetriever: """ Retriever used to get documents from vector queries. It retrieves the documents embeddings as well as the documents contents, and it formats them to be used with a RagModel. Args: config ([`RagConfig`]): The configuration of the RAG model this Retriever is used with. Contains parameters indicating which `Index` to build. You can load your own custom dataset with `config.index_name="custom"` or use a canonical one (default) from the datasets library with `config.index_name="wiki_dpr"` for example. question_encoder_tokenizer ([`PreTrainedTokenizer`]): The tokenizer that was used to tokenize the question. It is used to decode the question and then use the generator_tokenizer. generator_tokenizer ([`PreTrainedTokenizer`]): The tokenizer used for the generator part of the RagModel. index ([`~models.rag.retrieval_rag.Index`], optional, defaults to the one defined by the configuration): If specified, use this index instead of the one built using the configuration Examples: ```python >>> # To load the default "wiki_dpr" dataset with 21M passages from wikipedia (index name is 'compressed' or 'exact') >>> from transformers import RagRetriever >>> retriever = RagRetriever.from_pretrained( ... "facebook/dpr-ctx_encoder-single-nq-base", dataset="wiki_dpr", index_name="compressed" ... ) >>> # To load your own indexed dataset built with the datasets library. More info on how to build the indexed dataset in examples/rag/use_own_knowledge_dataset.py >>> from transformers import RagRetriever >>> dataset = ( ... ... ... ) # dataset must be a datasets.Datasets object with columns "title", "text" and "embeddings", and it must have a faiss index >>> retriever = RagRetriever.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base", indexed_dataset=dataset) >>> # To load your own indexed dataset built with the datasets library that was saved on disk. More info in examples/rag/use_own_knowledge_dataset.py >>> from transformers import RagRetriever >>> dataset_path = "path/to/my/dataset" # dataset saved via *dataset.save_to_disk(...)* >>> index_path = "path/to/my/index.faiss" # faiss index saved via *dataset.get_index("embeddings").save(...)* >>> retriever = RagRetriever.from_pretrained( ... "facebook/dpr-ctx_encoder-single-nq-base", ... index_name="custom", ... passages_path=dataset_path, ... index_path=index_path, ... ) >>> # To load the legacy index built originally for Rag's paper >>> from transformers import RagRetriever >>> retriever = RagRetriever.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base", index_name="legacy") ```""" def __init__(self, config, question_encoder_tokenizer, generator_tokenizer, index=None, init_retrieval=True): self._init_retrieval = init_retrieval requires_backends(self, ["datasets", "faiss"]) super().__init__() self.index = index or self._build_index(config) self.generator_tokenizer = generator_tokenizer self.question_encoder_tokenizer = question_encoder_tokenizer self.n_docs = config.n_docs self.batch_size = config.retrieval_batch_size self.config = config if self._init_retrieval: self.init_retrieval() self.ctx_encoder_tokenizer = None self.return_tokenized_docs = False @staticmethod def _build_index(config): if config.index_name == "legacy": return LegacyIndex( config.retrieval_vector_size, config.index_path or LEGACY_INDEX_PATH, ) elif config.index_name == "custom": return CustomHFIndex.load_from_disk( vector_size=config.retrieval_vector_size, dataset_path=config.passages_path, index_path=config.index_path, ) else: return CanonicalHFIndex( vector_size=config.retrieval_vector_size, dataset_name=config.dataset, dataset_split=config.dataset_split, index_name=config.index_name, index_path=config.index_path, use_dummy_dataset=config.use_dummy_dataset, ) @classmethod def from_pretrained(cls, retriever_name_or_path, indexed_dataset=None, **kwargs): requires_backends(cls, ["datasets", "faiss"]) config = kwargs.pop("config", None) or RagConfig.from_pretrained(retriever_name_or_path, **kwargs) rag_tokenizer = RagTokenizer.from_pretrained(retriever_name_or_path, config=config) question_encoder_tokenizer = rag_tokenizer.question_encoder generator_tokenizer = rag_tokenizer.generator if indexed_dataset is not None: config.index_name = "custom" index = CustomHFIndex(config.retrieval_vector_size, indexed_dataset) else: index = cls._build_index(config) return cls( config, question_encoder_tokenizer=question_encoder_tokenizer, generator_tokenizer=generator_tokenizer, index=index, ) def save_pretrained(self, save_directory): if isinstance(self.index, CustomHFIndex): if self.config.index_path is None: index_path = os.path.join(save_directory, "hf_dataset_index.faiss") self.index.dataset.get_index("embeddings").save(index_path) self.config.index_path = index_path if self.config.passages_path is None: passages_path = os.path.join(save_directory, "hf_dataset") # datasets don't support save_to_disk with indexes right now faiss_index = self.index.dataset._indexes.pop("embeddings") self.index.dataset.save_to_disk(passages_path) self.index.dataset._indexes["embeddings"] = faiss_index self.config.passages_path = passages_path self.config.save_pretrained(save_directory) rag_tokenizer = RagTokenizer( question_encoder=self.question_encoder_tokenizer, generator=self.generator_tokenizer, ) rag_tokenizer.save_pretrained(save_directory) def init_retrieval(self): """ Retriever initialization function. It loads the index into memory. """ logger.info("initializing retrieval") self.index.init_index() def postprocess_docs(self, docs, input_strings, prefix, n_docs, return_tensors=None): r""" Postprocessing retrieved `docs` and combining them with `input_strings`. Args: docs (`dict`): Retrieved documents. input_strings (`str`): Input strings decoded by `preprocess_query`. prefix (`str`): Prefix added at the beginning of each input, typically used with T5-based models. Return: `tuple(tensors)`: a tuple consisting of two elements: contextualized `input_ids` and a compatible `attention_mask`. """ def cat_input_and_doc(doc_title, doc_text, input_string, prefix): # TODO(Patrick): if we train more RAG models, I want to put the input first to take advantage of effortless truncation # TODO(piktus): better handling of truncation if doc_title.startswith('"'): doc_title = doc_title[1:] if doc_title.endswith('"'): doc_title = doc_title[:-1] if prefix is None: prefix = "" out = (prefix + doc_title + self.config.title_sep + doc_text + self.config.doc_sep + input_string).replace( " ", " " ) return out rag_input_strings = [ cat_input_and_doc( docs[i]["title"][j], docs[i]["text"][j], input_strings[i], prefix, ) for i in range(len(docs)) for j in range(n_docs) ] contextualized_inputs = self.generator_tokenizer.batch_encode_plus( rag_input_strings, max_length=self.config.max_combined_length, return_tensors=return_tensors, padding="max_length", truncation=True, ) return contextualized_inputs["input_ids"], contextualized_inputs["attention_mask"] def _chunk_tensor(self, t: Iterable, chunk_size: int) -> List[Iterable]: return [t[i : i + chunk_size] for i in range(0, len(t), chunk_size)] def _main_retrieve(self, question_hidden_states: np.ndarray, n_docs: int) -> Tuple[np.ndarray, np.ndarray]: question_hidden_states_batched = self._chunk_tensor(question_hidden_states, self.batch_size) ids_batched = [] vectors_batched = [] for question_hidden_states in question_hidden_states_batched: start_time = time.time() ids, vectors = self.index.get_top_docs(question_hidden_states, n_docs) logger.debug( f"index search time: {time.time() - start_time} sec, batch size {question_hidden_states.shape}" ) ids_batched.extend(ids) vectors_batched.extend(vectors) return ( np.array(ids_batched), np.array(vectors_batched), ) # shapes (batch_size, n_docs) and (batch_size, n_docs, d) def retrieve(self, question_hidden_states: np.ndarray, n_docs: int) -> Tuple[np.ndarray, List[dict]]: """ Retrieves documents for specified `question_hidden_states`. Args: question_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`): A batch of query vectors to retrieve with. n_docs (`int`): The number of docs retrieved per query. Return: `Tuple[np.ndarray, np.ndarray, List[dict]]`: A tuple with the following objects: - **retrieved_doc_embeds** (`np.ndarray` of shape `(batch_size, n_docs, dim)`) -- The retrieval embeddings of the retrieved docs per query. - **doc_ids** (`np.ndarray` of shape `(batch_size, n_docs)`) -- The ids of the documents in the index - **doc_dicts** (`List[dict]`): The `retrieved_doc_embeds` examples per query. """ doc_ids, retrieved_doc_embeds = self._main_retrieve(question_hidden_states, n_docs) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(doc_ids) def set_ctx_encoder_tokenizer(self, ctx_encoder_tokenizer: PreTrainedTokenizer): # used in end2end retriever training self.ctx_encoder_tokenizer = ctx_encoder_tokenizer self.return_tokenized_docs = True def __call__( self, question_input_ids: List[List[int]], question_hidden_states: np.ndarray, prefix=None, n_docs=None, return_tensors=None, ) -> BatchEncoding: """ Retrieves documents for specified `question_hidden_states`. Args: question_input_ids: (`List[List[int]]`) batch of input ids question_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`: A batch of query vectors to retrieve with. prefix: (`str`, *optional*): The prefix used by the generator's tokenizer. n_docs (`int`, *optional*): The number of docs retrieved per query. return_tensors (`str` or [`~utils.TensorType`], *optional*, defaults to "pt"): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. Returns: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **context_input_ids** -- List of token ids to be fed to a model. [What are input IDs?](../glossary#input-ids) - **context_attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`). [What are attention masks?](../glossary#attention-mask) - **retrieved_doc_embeds** -- List of embeddings of the retrieved documents - **doc_ids** -- List of ids of the retrieved documents """ n_docs = n_docs if n_docs is not None else self.n_docs prefix = prefix if prefix is not None else self.config.generator.prefix retrieved_doc_embeds, doc_ids, docs = self.retrieve(question_hidden_states, n_docs) input_strings = self.question_encoder_tokenizer.batch_decode(question_input_ids, skip_special_tokens=True) context_input_ids, context_attention_mask = self.postprocess_docs( docs, input_strings, prefix, n_docs, return_tensors=return_tensors ) if self.return_tokenized_docs: retrieved_doc_text = [] retrieved_doc_title = [] for b_idx in range(len(docs)): for doc_idx in range(n_docs): retrieved_doc_text.append(docs[b_idx]["text"][doc_idx]) retrieved_doc_title.append(docs[b_idx]["title"][doc_idx]) tokenized_docs = self.ctx_encoder_tokenizer( retrieved_doc_title, retrieved_doc_text, truncation=True, padding="longest", return_tensors=return_tensors, ) return BatchEncoding( { "context_input_ids": context_input_ids, "context_attention_mask": context_attention_mask, "retrieved_doc_embeds": retrieved_doc_embeds, "doc_ids": doc_ids, "tokenized_doc_ids": tokenized_docs["input_ids"], "tokenized_doc_attention_mask": tokenized_docs["attention_mask"], }, tensor_type=return_tensors, ) else: return BatchEncoding( { "context_input_ids": context_input_ids, "context_attention_mask": context_attention_mask, "retrieved_doc_embeds": retrieved_doc_embeds, "doc_ids": doc_ids, }, tensor_type=return_tensors, )
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./examples/research_projects/seq2seq-distillation/finetune.sh
# the proper usage is documented in the README, you need to specify data_dir, output_dir and model_name_or_path # run ./finetune.sh --help to see all the possible options python finetune.py \ --learning_rate=3e-5 \ --fp16 \ --gpus 1 \ --do_train \ --do_predict \ --n_val 1000 \ --val_check_interval 0.1 \ "$@"
# the proper usage is documented in the README, you need to specify data_dir, output_dir and model_name_or_path # run ./finetune.sh --help to see all the possible options python finetune.py \ --learning_rate=3e-5 \ --fp16 \ --gpus 1 \ --do_train \ --do_predict \ --n_val 1000 \ --val_check_interval 0.1 \ "$@"
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./tests/models/imagegpt/test_modeling_imagegpt.py
# coding=utf-8 # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import inspect import os import tempfile import unittest from transformers import ImageGPTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) if is_torch_available(): import torch from transformers import ( IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST, ImageGPTForCausalImageModeling, ImageGPTForImageClassification, ImageGPTModel, ) if is_vision_available(): from PIL import Image from transformers import ImageGPTFeatureExtractor class ImageGPTModelTester: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_token_type_ids=True, use_input_mask=True, use_labels=True, use_mc_token_ids=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_token_type_ids = use_token_type_ids self.use_input_mask = use_input_mask self.use_labels = use_labels self.use_mc_token_ids = use_mc_token_ids self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = None def get_large_model_config(self): return ImageGPTConfig.from_pretrained("imagegpt") def prepare_config_and_inputs( self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False ): pixel_values = ids_tensor([self.batch_size, self.seq_length], self.vocab_size - 1) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) mc_token_ids = None if self.use_mc_token_ids: mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config( gradient_checkpointing=gradient_checkpointing, scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx, reorder_and_upcast_attn=reorder_and_upcast_attn, ) head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) return ( config, pixel_values, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def get_config( self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False ): return ImageGPTConfig( vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_inner=self.intermediate_size, activation_function=self.hidden_act, resid_pdrop=self.hidden_dropout_prob, attn_pdrop=self.attention_probs_dropout_prob, n_positions=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, use_cache=True, gradient_checkpointing=gradient_checkpointing, scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx, reorder_and_upcast_attn=reorder_and_upcast_attn, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 513 config.max_position_embeddings = 1024 return config def prepare_config_and_inputs_for_decoder(self): ( config, pixel_values, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, pixel_values, input_mask, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_imagegpt_model(self, config, pixel_values, input_mask, head_mask, token_type_ids, *args): model = ImageGPTModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values, token_type_ids=token_type_ids, head_mask=head_mask) result = model(pixel_values, token_type_ids=token_type_ids) result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(len(result.past_key_values), config.n_layer) def create_and_check_lm_head_model(self, config, pixel_values, input_mask, head_mask, token_type_ids, *args): model = ImageGPTForCausalImageModeling(config) model.to(torch_device) model.eval() labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size - 1) result = model(pixel_values, token_type_ids=token_type_ids, labels=labels) self.parent.assertEqual(result.loss.shape, ()) # ImageGPTForCausalImageModeling doens't have tied input- and output embeddings self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size - 1)) def create_and_check_imagegpt_for_image_classification( self, config, pixel_values, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, *args ): config.num_labels = self.num_labels model = ImageGPTForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, pixel_values, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = { "pixel_values": pixel_values, "token_type_ids": token_type_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class ImageGPTModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = ( (ImageGPTForCausalImageModeling, ImageGPTForImageClassification, ImageGPTModel) if is_torch_available() else () ) all_generative_model_classes = (ImageGPTForCausalImageModeling,) if is_torch_available() else () test_missing_keys = False input_name = "pixel_values" # as ImageGPTForImageClassification isn't included in any auto mapping, we add labels here def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "ImageGPTForImageClassification": inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict # we overwrite the _check_scores method of GenerationTesterMixin, as ImageGPTForCausalImageModeling doesn't have tied input- and output embeddings def _check_scores(self, batch_size, scores, length, config): expected_shape = (batch_size, config.vocab_size - 1) self.assertIsInstance(scores, tuple) self.assertEqual(len(scores), length) self.assertListEqual([iter_scores.shape for iter_scores in scores], [expected_shape] * len(scores)) def setUp(self): self.model_tester = ImageGPTModelTester(self) self.config_tester = ConfigTester(self, config_class=ImageGPTConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_imagegpt_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_imagegpt_model(*config_and_inputs) def test_imagegpt_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*config_and_inputs) def test_imagegpt_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_imagegpt_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ImageGPTModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["input_ids"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_resize_tokens_embeddings(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) if self.model_tester.is_training is False: model.eval() model_vocab_size = config.vocab_size # Retrieve the embeddings and clone theme model_embed = model.resize_token_embeddings(model_vocab_size) cloned_embeddings = model_embed.weight.clone() # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary inputs_dict["pixel_values"].clamp_(max=model_vocab_size - 15 - 1) # Check that adding and removing tokens has not modified the first part of the embedding matrix. models_equal = True for p1, p2 in zip(cloned_embeddings, model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_resize_embeddings_untied(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return original_config.tie_word_embeddings = False # if model cannot untied embeddings -> leave test if original_config.tie_word_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config).to(torch_device) # if no output embeddings -> leave test if model.get_output_embeddings() is None: continue # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_vocab_size = config.vocab_size model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary inputs_dict["pixel_values"].clamp_(max=model_vocab_size - 15 - 1) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) pixel_values = inputs["pixel_values"] del inputs["pixel_values"] wte = model.get_input_embeddings() inputs["inputs_embeds"] = wte(pixel_values) with torch.no_grad(): model(**inputs)[0] def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) try: pixel_values = inputs["pixel_values"] traced_model = torch.jit.trace(model, pixel_values) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): if layer_name in loaded_model_state_dict: p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class ImageGPTModelIntegrationTest(unittest.TestCase): @cached_property def default_feature_extractor(self): return ImageGPTFeatureExtractor.from_pretrained("openai/imagegpt-small") if is_vision_available() else None @slow def test_inference_causal_lm_head(self): model = ImageGPTForCausalImageModeling.from_pretrained("openai/imagegpt-small").to(torch_device) feature_extractor = self.default_feature_extractor image = prepare_img() inputs = feature_extractor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1024, 512)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor( [[2.3445, 2.6889, 2.7313], [1.0530, 1.2416, 0.5699], [0.2205, 0.7749, 0.3953]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice, atol=1e-4))
# coding=utf-8 # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import inspect import os import tempfile import unittest from transformers import ImageGPTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) if is_torch_available(): import torch from transformers import ( IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST, ImageGPTForCausalImageModeling, ImageGPTForImageClassification, ImageGPTModel, ) if is_vision_available(): from PIL import Image from transformers import ImageGPTFeatureExtractor class ImageGPTModelTester: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_token_type_ids=True, use_input_mask=True, use_labels=True, use_mc_token_ids=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_token_type_ids = use_token_type_ids self.use_input_mask = use_input_mask self.use_labels = use_labels self.use_mc_token_ids = use_mc_token_ids self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = None def get_large_model_config(self): return ImageGPTConfig.from_pretrained("imagegpt") def prepare_config_and_inputs( self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False ): pixel_values = ids_tensor([self.batch_size, self.seq_length], self.vocab_size - 1) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) mc_token_ids = None if self.use_mc_token_ids: mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config( gradient_checkpointing=gradient_checkpointing, scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx, reorder_and_upcast_attn=reorder_and_upcast_attn, ) head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) return ( config, pixel_values, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def get_config( self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False ): return ImageGPTConfig( vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_inner=self.intermediate_size, activation_function=self.hidden_act, resid_pdrop=self.hidden_dropout_prob, attn_pdrop=self.attention_probs_dropout_prob, n_positions=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, use_cache=True, gradient_checkpointing=gradient_checkpointing, scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx, reorder_and_upcast_attn=reorder_and_upcast_attn, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 513 config.max_position_embeddings = 1024 return config def prepare_config_and_inputs_for_decoder(self): ( config, pixel_values, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, pixel_values, input_mask, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_imagegpt_model(self, config, pixel_values, input_mask, head_mask, token_type_ids, *args): model = ImageGPTModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values, token_type_ids=token_type_ids, head_mask=head_mask) result = model(pixel_values, token_type_ids=token_type_ids) result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(len(result.past_key_values), config.n_layer) def create_and_check_lm_head_model(self, config, pixel_values, input_mask, head_mask, token_type_ids, *args): model = ImageGPTForCausalImageModeling(config) model.to(torch_device) model.eval() labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size - 1) result = model(pixel_values, token_type_ids=token_type_ids, labels=labels) self.parent.assertEqual(result.loss.shape, ()) # ImageGPTForCausalImageModeling doens't have tied input- and output embeddings self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size - 1)) def create_and_check_imagegpt_for_image_classification( self, config, pixel_values, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, *args ): config.num_labels = self.num_labels model = ImageGPTForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, pixel_values, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = { "pixel_values": pixel_values, "token_type_ids": token_type_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class ImageGPTModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = ( (ImageGPTForCausalImageModeling, ImageGPTForImageClassification, ImageGPTModel) if is_torch_available() else () ) all_generative_model_classes = (ImageGPTForCausalImageModeling,) if is_torch_available() else () test_missing_keys = False input_name = "pixel_values" # as ImageGPTForImageClassification isn't included in any auto mapping, we add labels here def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "ImageGPTForImageClassification": inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict # we overwrite the _check_scores method of GenerationTesterMixin, as ImageGPTForCausalImageModeling doesn't have tied input- and output embeddings def _check_scores(self, batch_size, scores, length, config): expected_shape = (batch_size, config.vocab_size - 1) self.assertIsInstance(scores, tuple) self.assertEqual(len(scores), length) self.assertListEqual([iter_scores.shape for iter_scores in scores], [expected_shape] * len(scores)) def setUp(self): self.model_tester = ImageGPTModelTester(self) self.config_tester = ConfigTester(self, config_class=ImageGPTConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_imagegpt_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_imagegpt_model(*config_and_inputs) def test_imagegpt_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*config_and_inputs) def test_imagegpt_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_imagegpt_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ImageGPTModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["input_ids"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_resize_tokens_embeddings(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) if self.model_tester.is_training is False: model.eval() model_vocab_size = config.vocab_size # Retrieve the embeddings and clone theme model_embed = model.resize_token_embeddings(model_vocab_size) cloned_embeddings = model_embed.weight.clone() # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary inputs_dict["pixel_values"].clamp_(max=model_vocab_size - 15 - 1) # Check that adding and removing tokens has not modified the first part of the embedding matrix. models_equal = True for p1, p2 in zip(cloned_embeddings, model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_resize_embeddings_untied(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return original_config.tie_word_embeddings = False # if model cannot untied embeddings -> leave test if original_config.tie_word_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config).to(torch_device) # if no output embeddings -> leave test if model.get_output_embeddings() is None: continue # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_vocab_size = config.vocab_size model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary inputs_dict["pixel_values"].clamp_(max=model_vocab_size - 15 - 1) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) pixel_values = inputs["pixel_values"] del inputs["pixel_values"] wte = model.get_input_embeddings() inputs["inputs_embeds"] = wte(pixel_values) with torch.no_grad(): model(**inputs)[0] def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) try: pixel_values = inputs["pixel_values"] traced_model = torch.jit.trace(model, pixel_values) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): if layer_name in loaded_model_state_dict: p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class ImageGPTModelIntegrationTest(unittest.TestCase): @cached_property def default_feature_extractor(self): return ImageGPTFeatureExtractor.from_pretrained("openai/imagegpt-small") if is_vision_available() else None @slow def test_inference_causal_lm_head(self): model = ImageGPTForCausalImageModeling.from_pretrained("openai/imagegpt-small").to(torch_device) feature_extractor = self.default_feature_extractor image = prepare_img() inputs = feature_extractor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1024, 512)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor( [[2.3445, 2.6889, 2.7313], [1.0530, 1.2416, 0.5699], [0.2205, 0.7749, 0.3953]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice, atol=1e-4))
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./examples/research_projects/wav2vec2/run_pretrain.py
#!/usr/bin/env python3 import logging import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import torch from datasets import DatasetDict, load_dataset from packaging import version from torch import nn import librosa from transformers import ( HfArgumentParser, Trainer, TrainingArguments, Wav2Vec2Config, Wav2Vec2FeatureExtractor, Wav2Vec2ForPreTraining, is_apex_available, trainer_utils, ) from transformers.models.wav2vec2.modeling_wav2vec2 import _compute_mask_indices if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"): _is_native_amp_available = True from torch.cuda.amp import autocast logger = logging.getLogger(__name__) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) freeze_feature_extractor: Optional[bool] = field( default=True, metadata={"help": "Whether to freeze the feature extractor layers of the model."} ) verbose_logging: Optional[bool] = field( default=False, metadata={"help": "Whether to log verbose messages or not."}, ) max_gumbel_temperature: Optional[float] = field( default=2.0, metadata={"help": "Maximum temperature for gumbel softmax."} ) min_gumbel_temperature: Optional[float] = field( default=0.5, metadata={"help": "Minimum temperature for gumbel softmax."} ) gumbel_temperature_decay: Optional[float] = field( default=0.999995, metadata={"help": "Decay of gumbel temperature during training."} ) def configure_logger(model_args: ModelArguments, training_args: TrainingArguments): logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logging_level = logging.WARNING if model_args.verbose_logging: logging_level = logging.DEBUG elif trainer_utils.is_main_process(training_args.local_rank): logging_level = logging.INFO logger.setLevel(logging_level) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. """ dataset_name: str = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_split_name: Optional[str] = field( default="train", metadata={ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" }, ) validation_split_name: Optional[str] = field( default="validation", metadata={ "help": ( "The name of the validation data set split to use (via the datasets library). Defaults to 'validation'" ) }, ) speech_file_column: Optional[str] = field( default="file", metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"}, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) validation_split_percentage: Optional[int] = field( default=1, metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" }, ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) max_duration_in_seconds: Optional[float] = field( default=20.0, metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} ) @dataclass class DataCollatorForWav2Vec2Pretraining: """ Data collator that will dynamically pad the inputs received and prepare masked indices for self-supervised pretraining. Args: model (:class:`~transformers.Wav2Vec2ForPreTraining`): The Wav2Vec2 model used for pretraining. The data collator needs to have access to config and ``_get_feat_extract_output_lengths`` function for correct padding. feature_extractor (:class:`~transformers.Wav2Vec2FeatureExtractor`): The processor used for proccessing the data. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (:obj:`int`, `optional`): Maximum length of the ``input_values`` of the returned list and optionally padding length (see above). pad_to_multiple_of (:obj:`int`, `optional`): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). """ model: Wav2Vec2ForPreTraining feature_extractor: Wav2Vec2FeatureExtractor padding: Union[bool, str] = "longest" pad_to_multiple_of: Optional[int] = None max_length: Optional[int] = None def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # reformat list to dict and set to pytorch format batch = self.feature_extractor.pad( features, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", ) mask_indices_seq_length = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1]) batch_size = batch["input_values"].shape[0] # make sure that no loss is computed on padded inputs if batch["attention_mask"] is not None: # compute real output lengths according to convolution formula output_lengths = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1)).to( torch.long ) attention_mask = torch.zeros( (batch_size, mask_indices_seq_length), dtype=torch.long, device=batch["input_values"].device ) # these two operations makes sure that all values # before the output lengths indices are attended to attention_mask[ (torch.arange(attention_mask.shape[0], device=batch["input_values"].device), output_lengths - 1) ] = 1 attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool() # sample randomly masked indices batch["mask_time_indices"] = _compute_mask_indices( (batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=attention_mask, min_masks=2, ) return batch class Wav2Vec2PreTrainer(Trainer): """ Subclassed :class:`~transformers.Trainer` for Wav2Vec2-like pretraining. Trainer can decay gumbel softmax temperature during training. """ def __init__(self, *args, max_gumbel_temp=1, min_gumbel_temp=0, gumbel_temp_decay=1.0, **kwargs): super().__init__(*args, **kwargs) self.num_update_step = 0 self.max_gumbel_temp = max_gumbel_temp self.min_gumbel_temp = min_gumbel_temp self.gumbel_temp_decay = gumbel_temp_decay def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor: """ Perform a training step on a batch of inputs. Subclass and override to inject custom behavior. Args: model (:obj:`nn.Module`): The model to train. inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the model. The dictionary will be unpacked before being fed to the model. Most models expect the targets under the argument :obj:`labels`. Check your model's documentation for all accepted arguments. Return: :obj:`torch.Tensor`: The tensor with training loss on this batch. """ model.train() inputs = self._prepare_inputs(inputs) if self.use_amp: with autocast(): loss = self.compute_loss(model, inputs) else: loss = self.compute_loss(model, inputs) if self.args.n_gpu > 1 or self.deepspeed: if model.module.config.ctc_loss_reduction == "mean": loss = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": loss = loss.sum() / (inputs["mask_time_indices"]).sum() else: raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']") if self.args.gradient_accumulation_steps > 1: loss = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(loss).backward() elif self.use_apex: with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(loss) else: loss.backward() self.num_update_step += 1 # make sure gumbel softmax temperature is decayed if self.args.n_gpu > 1 or self.deepspeed: model.module.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp) ) else: model.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp) ) return loss.detach() def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) model_args, data_args, training_args = parser.parse_args_into_dataclasses() configure_logger(model_args, training_args) # Downloading and loading a dataset from the hub. datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir) if "validation" not in datasets.keys(): # make sure only "validation" and "train" keys remain" datasets = DatasetDict() datasets["validation"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]", cache_dir=model_args.cache_dir, ) datasets["train"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]", cache_dir=model_args.cache_dir, ) else: # make sure only "validation" and "train" keys remain" datasets = DatasetDict() datasets["validation"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split="validation", cache_dir=model_args.cache_dir, ) datasets["train"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"{data_args.train_split_name}", cache_dir=model_args.cache_dir, ) # only normalized-inputs-training is supported feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, do_normalize=True ) def prepare_dataset(batch): # check that all files have the correct sampling rate batch["speech"], _ = librosa.load(batch[data_args.speech_file_column], sr=feature_extractor.sampling_rate) return batch # load audio files into numpy arrays vectorized_datasets = datasets.map( prepare_dataset, num_proc=data_args.preprocessing_num_workers, remove_columns=datasets["train"].column_names ) # filter audio files that are too long vectorized_datasets = vectorized_datasets.filter( lambda data: len(data["speech"]) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate) ) def normalize(batch): return feature_extractor(batch["speech"], sampling_rate=feature_extractor.sampling_rate) # normalize and transform to `BatchFeatures` vectorized_datasets = vectorized_datasets.map( normalize, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, remove_columns=vectorized_datasets["train"].column_names, ) # pretraining is only supported for "newer" stable layer norm architecture # apply_spec_augment has to be True, mask_feature_prob has to be 0.0 config = Wav2Vec2Config.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, gradient_checkpointing=training_args.gradient_checkpointing, ) if not config.do_stable_layer_norm or config.feat_extract_norm != "layer": raise ValueError( "PreTraining is only supported for ``config.do_stable_layer_norm=True`` and" " ``config.feat_extract_norm='layer'" ) model = Wav2Vec2ForPreTraining(config) data_collator = DataCollatorForWav2Vec2Pretraining(model=model, feature_extractor=feature_extractor) trainer = Wav2Vec2PreTrainer( model=model, data_collator=data_collator, args=training_args, train_dataset=vectorized_datasets["train"], eval_dataset=vectorized_datasets["validation"], tokenizer=feature_extractor, max_gumbel_temp=model_args.max_gumbel_temperature, min_gumbel_temp=model_args.min_gumbel_temperature, gumbel_temp_decay=model_args.gumbel_temperature_decay, ) trainer.train() if __name__ == "__main__": main()
#!/usr/bin/env python3 import logging import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import torch from datasets import DatasetDict, load_dataset from packaging import version from torch import nn import librosa from transformers import ( HfArgumentParser, Trainer, TrainingArguments, Wav2Vec2Config, Wav2Vec2FeatureExtractor, Wav2Vec2ForPreTraining, is_apex_available, trainer_utils, ) from transformers.models.wav2vec2.modeling_wav2vec2 import _compute_mask_indices if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"): _is_native_amp_available = True from torch.cuda.amp import autocast logger = logging.getLogger(__name__) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) freeze_feature_extractor: Optional[bool] = field( default=True, metadata={"help": "Whether to freeze the feature extractor layers of the model."} ) verbose_logging: Optional[bool] = field( default=False, metadata={"help": "Whether to log verbose messages or not."}, ) max_gumbel_temperature: Optional[float] = field( default=2.0, metadata={"help": "Maximum temperature for gumbel softmax."} ) min_gumbel_temperature: Optional[float] = field( default=0.5, metadata={"help": "Minimum temperature for gumbel softmax."} ) gumbel_temperature_decay: Optional[float] = field( default=0.999995, metadata={"help": "Decay of gumbel temperature during training."} ) def configure_logger(model_args: ModelArguments, training_args: TrainingArguments): logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logging_level = logging.WARNING if model_args.verbose_logging: logging_level = logging.DEBUG elif trainer_utils.is_main_process(training_args.local_rank): logging_level = logging.INFO logger.setLevel(logging_level) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. """ dataset_name: str = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_split_name: Optional[str] = field( default="train", metadata={ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" }, ) validation_split_name: Optional[str] = field( default="validation", metadata={ "help": ( "The name of the validation data set split to use (via the datasets library). Defaults to 'validation'" ) }, ) speech_file_column: Optional[str] = field( default="file", metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"}, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) validation_split_percentage: Optional[int] = field( default=1, metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" }, ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) max_duration_in_seconds: Optional[float] = field( default=20.0, metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} ) @dataclass class DataCollatorForWav2Vec2Pretraining: """ Data collator that will dynamically pad the inputs received and prepare masked indices for self-supervised pretraining. Args: model (:class:`~transformers.Wav2Vec2ForPreTraining`): The Wav2Vec2 model used for pretraining. The data collator needs to have access to config and ``_get_feat_extract_output_lengths`` function for correct padding. feature_extractor (:class:`~transformers.Wav2Vec2FeatureExtractor`): The processor used for proccessing the data. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (:obj:`int`, `optional`): Maximum length of the ``input_values`` of the returned list and optionally padding length (see above). pad_to_multiple_of (:obj:`int`, `optional`): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). """ model: Wav2Vec2ForPreTraining feature_extractor: Wav2Vec2FeatureExtractor padding: Union[bool, str] = "longest" pad_to_multiple_of: Optional[int] = None max_length: Optional[int] = None def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # reformat list to dict and set to pytorch format batch = self.feature_extractor.pad( features, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", ) mask_indices_seq_length = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1]) batch_size = batch["input_values"].shape[0] # make sure that no loss is computed on padded inputs if batch["attention_mask"] is not None: # compute real output lengths according to convolution formula output_lengths = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1)).to( torch.long ) attention_mask = torch.zeros( (batch_size, mask_indices_seq_length), dtype=torch.long, device=batch["input_values"].device ) # these two operations makes sure that all values # before the output lengths indices are attended to attention_mask[ (torch.arange(attention_mask.shape[0], device=batch["input_values"].device), output_lengths - 1) ] = 1 attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool() # sample randomly masked indices batch["mask_time_indices"] = _compute_mask_indices( (batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=attention_mask, min_masks=2, ) return batch class Wav2Vec2PreTrainer(Trainer): """ Subclassed :class:`~transformers.Trainer` for Wav2Vec2-like pretraining. Trainer can decay gumbel softmax temperature during training. """ def __init__(self, *args, max_gumbel_temp=1, min_gumbel_temp=0, gumbel_temp_decay=1.0, **kwargs): super().__init__(*args, **kwargs) self.num_update_step = 0 self.max_gumbel_temp = max_gumbel_temp self.min_gumbel_temp = min_gumbel_temp self.gumbel_temp_decay = gumbel_temp_decay def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor: """ Perform a training step on a batch of inputs. Subclass and override to inject custom behavior. Args: model (:obj:`nn.Module`): The model to train. inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the model. The dictionary will be unpacked before being fed to the model. Most models expect the targets under the argument :obj:`labels`. Check your model's documentation for all accepted arguments. Return: :obj:`torch.Tensor`: The tensor with training loss on this batch. """ model.train() inputs = self._prepare_inputs(inputs) if self.use_amp: with autocast(): loss = self.compute_loss(model, inputs) else: loss = self.compute_loss(model, inputs) if self.args.n_gpu > 1 or self.deepspeed: if model.module.config.ctc_loss_reduction == "mean": loss = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": loss = loss.sum() / (inputs["mask_time_indices"]).sum() else: raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']") if self.args.gradient_accumulation_steps > 1: loss = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(loss).backward() elif self.use_apex: with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(loss) else: loss.backward() self.num_update_step += 1 # make sure gumbel softmax temperature is decayed if self.args.n_gpu > 1 or self.deepspeed: model.module.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp) ) else: model.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp) ) return loss.detach() def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) model_args, data_args, training_args = parser.parse_args_into_dataclasses() configure_logger(model_args, training_args) # Downloading and loading a dataset from the hub. datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir) if "validation" not in datasets.keys(): # make sure only "validation" and "train" keys remain" datasets = DatasetDict() datasets["validation"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]", cache_dir=model_args.cache_dir, ) datasets["train"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]", cache_dir=model_args.cache_dir, ) else: # make sure only "validation" and "train" keys remain" datasets = DatasetDict() datasets["validation"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split="validation", cache_dir=model_args.cache_dir, ) datasets["train"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"{data_args.train_split_name}", cache_dir=model_args.cache_dir, ) # only normalized-inputs-training is supported feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, do_normalize=True ) def prepare_dataset(batch): # check that all files have the correct sampling rate batch["speech"], _ = librosa.load(batch[data_args.speech_file_column], sr=feature_extractor.sampling_rate) return batch # load audio files into numpy arrays vectorized_datasets = datasets.map( prepare_dataset, num_proc=data_args.preprocessing_num_workers, remove_columns=datasets["train"].column_names ) # filter audio files that are too long vectorized_datasets = vectorized_datasets.filter( lambda data: len(data["speech"]) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate) ) def normalize(batch): return feature_extractor(batch["speech"], sampling_rate=feature_extractor.sampling_rate) # normalize and transform to `BatchFeatures` vectorized_datasets = vectorized_datasets.map( normalize, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, remove_columns=vectorized_datasets["train"].column_names, ) # pretraining is only supported for "newer" stable layer norm architecture # apply_spec_augment has to be True, mask_feature_prob has to be 0.0 config = Wav2Vec2Config.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, gradient_checkpointing=training_args.gradient_checkpointing, ) if not config.do_stable_layer_norm or config.feat_extract_norm != "layer": raise ValueError( "PreTraining is only supported for ``config.do_stable_layer_norm=True`` and" " ``config.feat_extract_norm='layer'" ) model = Wav2Vec2ForPreTraining(config) data_collator = DataCollatorForWav2Vec2Pretraining(model=model, feature_extractor=feature_extractor) trainer = Wav2Vec2PreTrainer( model=model, data_collator=data_collator, args=training_args, train_dataset=vectorized_datasets["train"], eval_dataset=vectorized_datasets["validation"], tokenizer=feature_extractor, max_gumbel_temp=model_args.max_gumbel_temperature, min_gumbel_temp=model_args.min_gumbel_temperature, gumbel_temp_decay=model_args.gumbel_temperature_decay, ) trainer.train() if __name__ == "__main__": main()
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./src/transformers/models/donut/__init__.py
# flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all. # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _import_structure = { "configuration_donut_swin": ["DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "DonutSwinConfig"], "processing_donut": ["DonutProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_donut_swin"] = [ "DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST", "DonutSwinModel", "DonutSwinPreTrainedModel", ] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["feature_extraction_donut"] = ["DonutFeatureExtractor"] if TYPE_CHECKING: from .configuration_donut_swin import DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, DonutSwinConfig from .processing_donut import DonutProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_donut_swin import ( DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, DonutSwinModel, DonutSwinPreTrainedModel, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_donut import DonutFeatureExtractor else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
# flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all. # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _import_structure = { "configuration_donut_swin": ["DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "DonutSwinConfig"], "processing_donut": ["DonutProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_donut_swin"] = [ "DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST", "DonutSwinModel", "DonutSwinPreTrainedModel", ] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["feature_extraction_donut"] = ["DonutFeatureExtractor"] if TYPE_CHECKING: from .configuration_donut_swin import DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, DonutSwinConfig from .processing_donut import DonutProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_donut_swin import ( DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, DonutSwinModel, DonutSwinPreTrainedModel, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_donut import DonutFeatureExtractor else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./src/transformers/models/mobilevit/modeling_mobilevit.py
# coding=utf-8 # Copyright 2022 Apple Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Original license: https://github.com/apple/ml-cvnets/blob/main/LICENSE """ PyTorch MobileViT model.""" import math from typing import Dict, Optional, Set, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, SemanticSegmenterOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_mobilevit import MobileViTConfig logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "MobileViTConfig" _FEAT_EXTRACTOR_FOR_DOC = "MobileViTFeatureExtractor" # Base docstring _CHECKPOINT_FOR_DOC = "apple/mobilevit-small" _EXPECTED_OUTPUT_SHAPE = [1, 640, 8, 8] # Image classification docstring _IMAGE_CLASS_CHECKPOINT = "apple/mobilevit-small" _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "apple/mobilevit-small", "apple/mobilevit-x-small", "apple/mobilevit-xx-small", "apple/deeplabv3-mobilevit-small", "apple/deeplabv3-mobilevit-x-small", "apple/deeplabv3-mobilevit-xx-small", # See all MobileViT models at https://huggingface.co/models?filter=mobilevit ] def make_divisible(value: int, divisor: int = 8, min_value: Optional[int] = None) -> int: """ Ensure that all layers have a channel count that is divisible by `divisor`. This function is taken from the original TensorFlow repo. It can be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py """ if min_value is None: min_value = divisor new_value = max(min_value, int(value + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than 10%. if new_value < 0.9 * value: new_value += divisor return int(new_value) class MobileViTConvLayer(nn.Module): def __init__( self, config: MobileViTConfig, in_channels: int, out_channels: int, kernel_size: int, stride: int = 1, groups: int = 1, bias: bool = False, dilation: int = 1, use_normalization: bool = True, use_activation: Union[bool, str] = True, ) -> None: super().__init__() padding = int((kernel_size - 1) / 2) * dilation if in_channels % groups != 0: raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups.") if out_channels % groups != 0: raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups.") self.convolution = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, padding_mode="zeros", ) if use_normalization: self.normalization = nn.BatchNorm2d( num_features=out_channels, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True, ) else: self.normalization = None if use_activation: if isinstance(use_activation, str): self.activation = ACT2FN[use_activation] elif isinstance(config.hidden_act, str): self.activation = ACT2FN[config.hidden_act] else: self.activation = config.hidden_act else: self.activation = None def forward(self, features: torch.Tensor) -> torch.Tensor: features = self.convolution(features) if self.normalization is not None: features = self.normalization(features) if self.activation is not None: features = self.activation(features) return features class MobileViTInvertedResidual(nn.Module): """ Inverted residual block (MobileNetv2): https://arxiv.org/abs/1801.04381 """ def __init__( self, config: MobileViTConfig, in_channels: int, out_channels: int, stride: int, dilation: int = 1 ) -> None: super().__init__() expanded_channels = make_divisible(int(round(in_channels * config.expand_ratio)), 8) if stride not in [1, 2]: raise ValueError(f"Invalid stride {stride}.") self.use_residual = (stride == 1) and (in_channels == out_channels) self.expand_1x1 = MobileViTConvLayer( config, in_channels=in_channels, out_channels=expanded_channels, kernel_size=1 ) self.conv_3x3 = MobileViTConvLayer( config, in_channels=expanded_channels, out_channels=expanded_channels, kernel_size=3, stride=stride, groups=expanded_channels, dilation=dilation, ) self.reduce_1x1 = MobileViTConvLayer( config, in_channels=expanded_channels, out_channels=out_channels, kernel_size=1, use_activation=False, ) def forward(self, features: torch.Tensor) -> torch.Tensor: residual = features features = self.expand_1x1(features) features = self.conv_3x3(features) features = self.reduce_1x1(features) return residual + features if self.use_residual else features class MobileViTMobileNetLayer(nn.Module): def __init__( self, config: MobileViTConfig, in_channels: int, out_channels: int, stride: int = 1, num_stages: int = 1 ) -> None: super().__init__() self.layer = nn.ModuleList() for i in range(num_stages): layer = MobileViTInvertedResidual( config, in_channels=in_channels, out_channels=out_channels, stride=stride if i == 0 else 1, ) self.layer.append(layer) in_channels = out_channels def forward(self, features: torch.Tensor) -> torch.Tensor: for layer_module in self.layer: features = layer_module(features) return features class MobileViTSelfAttention(nn.Module): def __init__(self, config: MobileViTConfig, hidden_size: int) -> None: super().__init__() if hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size {hidden_size,} is not a multiple of the number of attention " f"heads {config.num_attention_heads}." ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(hidden_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(hidden_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(hidden_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer class MobileViTSelfOutput(nn.Module): def __init__(self, config: MobileViTConfig, hidden_size: int) -> None: super().__init__() self.dense = nn.Linear(hidden_size, hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class MobileViTAttention(nn.Module): def __init__(self, config: MobileViTConfig, hidden_size: int) -> None: super().__init__() self.attention = MobileViTSelfAttention(config, hidden_size) self.output = MobileViTSelfOutput(config, hidden_size) self.pruned_heads = set() def prune_heads(self, heads: Set[int]) -> None: if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads ) # Prune linear layers self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: self_outputs = self.attention(hidden_states) attention_output = self.output(self_outputs) return attention_output class MobileViTIntermediate(nn.Module): def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int) -> None: super().__init__() self.dense = nn.Linear(hidden_size, intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class MobileViTOutput(nn.Module): def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int) -> None: super().__init__() self.dense = nn.Linear(intermediate_size, hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states class MobileViTTransformerLayer(nn.Module): def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int) -> None: super().__init__() self.attention = MobileViTAttention(config, hidden_size) self.intermediate = MobileViTIntermediate(config, hidden_size, intermediate_size) self.output = MobileViTOutput(config, hidden_size, intermediate_size) self.layernorm_before = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps) self.layernorm_after = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: attention_output = self.attention(self.layernorm_before(hidden_states)) hidden_states = attention_output + hidden_states layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) layer_output = self.output(layer_output, hidden_states) return layer_output class MobileViTTransformer(nn.Module): def __init__(self, config: MobileViTConfig, hidden_size: int, num_stages: int) -> None: super().__init__() self.layer = nn.ModuleList() for _ in range(num_stages): transformer_layer = MobileViTTransformerLayer( config, hidden_size=hidden_size, intermediate_size=int(hidden_size * config.mlp_ratio), ) self.layer.append(transformer_layer) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: for layer_module in self.layer: hidden_states = layer_module(hidden_states) return hidden_states class MobileViTLayer(nn.Module): """ MobileViT block: https://arxiv.org/abs/2110.02178 """ def __init__( self, config: MobileViTConfig, in_channels: int, out_channels: int, stride: int, hidden_size: int, num_stages: int, dilation: int = 1, ) -> None: super().__init__() self.patch_width = config.patch_size self.patch_height = config.patch_size if stride == 2: self.downsampling_layer = MobileViTInvertedResidual( config, in_channels=in_channels, out_channels=out_channels, stride=stride if dilation == 1 else 1, dilation=dilation // 2 if dilation > 1 else 1, ) in_channels = out_channels else: self.downsampling_layer = None self.conv_kxk = MobileViTConvLayer( config, in_channels=in_channels, out_channels=in_channels, kernel_size=config.conv_kernel_size, ) self.conv_1x1 = MobileViTConvLayer( config, in_channels=in_channels, out_channels=hidden_size, kernel_size=1, use_normalization=False, use_activation=False, ) self.transformer = MobileViTTransformer( config, hidden_size=hidden_size, num_stages=num_stages, ) self.layernorm = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps) self.conv_projection = MobileViTConvLayer( config, in_channels=hidden_size, out_channels=in_channels, kernel_size=1 ) self.fusion = MobileViTConvLayer( config, in_channels=2 * in_channels, out_channels=in_channels, kernel_size=config.conv_kernel_size ) def unfolding(self, features: torch.Tensor) -> Tuple[torch.Tensor, Dict]: patch_width, patch_height = self.patch_width, self.patch_height patch_area = int(patch_width * patch_height) batch_size, channels, orig_height, orig_width = features.shape new_height = int(math.ceil(orig_height / patch_height) * patch_height) new_width = int(math.ceil(orig_width / patch_width) * patch_width) interpolate = False if new_width != orig_width or new_height != orig_height: # Note: Padding can be done, but then it needs to be handled in attention function. features = nn.functional.interpolate( features, size=(new_height, new_width), mode="bilinear", align_corners=False ) interpolate = True # number of patches along width and height num_patch_width = new_width // patch_width num_patch_height = new_height // patch_height num_patches = num_patch_height * num_patch_width # convert from shape (batch_size, channels, orig_height, orig_width) # to the shape (batch_size * patch_area, num_patches, channels) patches = features.reshape( batch_size * channels * num_patch_height, patch_height, num_patch_width, patch_width ) patches = patches.transpose(1, 2) patches = patches.reshape(batch_size, channels, num_patches, patch_area) patches = patches.transpose(1, 3) patches = patches.reshape(batch_size * patch_area, num_patches, -1) info_dict = { "orig_size": (orig_height, orig_width), "batch_size": batch_size, "channels": channels, "interpolate": interpolate, "num_patches": num_patches, "num_patches_width": num_patch_width, "num_patches_height": num_patch_height, } return patches, info_dict def folding(self, patches: torch.Tensor, info_dict: Dict) -> torch.Tensor: patch_width, patch_height = self.patch_width, self.patch_height patch_area = int(patch_width * patch_height) batch_size = info_dict["batch_size"] channels = info_dict["channels"] num_patches = info_dict["num_patches"] num_patch_height = info_dict["num_patches_height"] num_patch_width = info_dict["num_patches_width"] # convert from shape (batch_size * patch_area, num_patches, channels) # back to shape (batch_size, channels, orig_height, orig_width) features = patches.contiguous().view(batch_size, patch_area, num_patches, -1) features = features.transpose(1, 3) features = features.reshape( batch_size * channels * num_patch_height, num_patch_width, patch_height, patch_width ) features = features.transpose(1, 2) features = features.reshape( batch_size, channels, num_patch_height * patch_height, num_patch_width * patch_width ) if info_dict["interpolate"]: features = nn.functional.interpolate( features, size=info_dict["orig_size"], mode="bilinear", align_corners=False ) return features def forward(self, features: torch.Tensor) -> torch.Tensor: # reduce spatial dimensions if needed if self.downsampling_layer: features = self.downsampling_layer(features) residual = features # local representation features = self.conv_kxk(features) features = self.conv_1x1(features) # convert feature map to patches patches, info_dict = self.unfolding(features) # learn global representations patches = self.transformer(patches) patches = self.layernorm(patches) # convert patches back to feature maps features = self.folding(patches, info_dict) features = self.conv_projection(features) features = self.fusion(torch.cat((residual, features), dim=1)) return features class MobileViTEncoder(nn.Module): def __init__(self, config: MobileViTConfig) -> None: super().__init__() self.config = config self.layer = nn.ModuleList() self.gradient_checkpointing = False # segmentation architectures like DeepLab and PSPNet modify the strides # of the classification backbones dilate_layer_4 = dilate_layer_5 = False if config.output_stride == 8: dilate_layer_4 = True dilate_layer_5 = True elif config.output_stride == 16: dilate_layer_5 = True dilation = 1 layer_1 = MobileViTMobileNetLayer( config, in_channels=config.neck_hidden_sizes[0], out_channels=config.neck_hidden_sizes[1], stride=1, num_stages=1, ) self.layer.append(layer_1) layer_2 = MobileViTMobileNetLayer( config, in_channels=config.neck_hidden_sizes[1], out_channels=config.neck_hidden_sizes[2], stride=2, num_stages=3, ) self.layer.append(layer_2) layer_3 = MobileViTLayer( config, in_channels=config.neck_hidden_sizes[2], out_channels=config.neck_hidden_sizes[3], stride=2, hidden_size=config.hidden_sizes[0], num_stages=2, ) self.layer.append(layer_3) if dilate_layer_4: dilation *= 2 layer_4 = MobileViTLayer( config, in_channels=config.neck_hidden_sizes[3], out_channels=config.neck_hidden_sizes[4], stride=2, hidden_size=config.hidden_sizes[1], num_stages=4, dilation=dilation, ) self.layer.append(layer_4) if dilate_layer_5: dilation *= 2 layer_5 = MobileViTLayer( config, in_channels=config.neck_hidden_sizes[4], out_channels=config.neck_hidden_sizes[5], stride=2, hidden_size=config.hidden_sizes[2], num_stages=3, dilation=dilation, ) self.layer.append(layer_5) def forward( self, hidden_states: torch.Tensor, output_hidden_states: bool = False, return_dict: bool = True, ) -> Union[tuple, BaseModelOutputWithNoAttention]: all_hidden_states = () if output_hidden_states else None for i, layer_module in enumerate(self.layer): if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, ) else: hidden_states = layer_module(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None) return BaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states) class MobileViTPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = MobileViTConfig base_model_prefix = "mobilevit" main_input_name = "pixel_values" supports_gradient_checkpointing = True def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, MobileViTEncoder): module.gradient_checkpointing = value MOBILEVIT_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MobileViTConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ MOBILEVIT_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`MobileViTFeatureExtractor`]. See [`MobileViTFeatureExtractor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare MobileViT model outputting raw hidden-states without any specific head on top.", MOBILEVIT_START_DOCSTRING, ) class MobileViTModel(MobileViTPreTrainedModel): def __init__(self, config: MobileViTConfig, expand_output: bool = True): super().__init__(config) self.config = config self.expand_output = expand_output self.conv_stem = MobileViTConvLayer( config, in_channels=config.num_channels, out_channels=config.neck_hidden_sizes[0], kernel_size=3, stride=2, ) self.encoder = MobileViTEncoder(config) if self.expand_output: self.conv_1x1_exp = MobileViTConvLayer( config, in_channels=config.neck_hidden_sizes[5], out_channels=config.neck_hidden_sizes[6], kernel_size=1, ) # Initialize weights and apply final processing self.post_init() def _prune_heads(self, heads_to_prune): """Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer_index, heads in heads_to_prune.items(): mobilevit_layer = self.encoder.layer[layer_index] if isinstance(mobilevit_layer, MobileViTLayer): for transformer_layer in mobilevit_layer.transformer.layer: transformer_layer.attention.prune_heads(heads) @add_start_docstrings_to_model_forward(MOBILEVIT_INPUTS_DOCSTRING) @add_code_sample_docstrings( processor_class=_FEAT_EXTRACTOR_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndNoAttention, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, pixel_values: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") embedding_output = self.conv_stem(pixel_values) encoder_outputs = self.encoder( embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.expand_output: last_hidden_state = self.conv_1x1_exp(encoder_outputs[0]) # global average pooling: (batch_size, channels, height, width) -> (batch_size, channels) pooled_output = torch.mean(last_hidden_state, dim=[-2, -1], keepdim=False) else: last_hidden_state = encoder_outputs[0] pooled_output = None if not return_dict: output = (last_hidden_state, pooled_output) if pooled_output is not None else (last_hidden_state,) return output + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, ) @add_start_docstrings( """ MobileViT model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """, MOBILEVIT_START_DOCSTRING, ) class MobileViTForImageClassification(MobileViTPreTrainedModel): def __init__(self, config: MobileViTConfig) -> None: super().__init__(config) self.num_labels = config.num_labels self.mobilevit = MobileViTModel(config) # Classifier head self.dropout = nn.Dropout(config.classifier_dropout_prob, inplace=True) self.classifier = ( nn.Linear(config.neck_hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MOBILEVIT_INPUTS_DOCSTRING) @add_code_sample_docstrings( processor_class=_FEAT_EXTRACTOR_FOR_DOC, checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=ImageClassifierOutputWithNoAttention, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def forward( self, pixel_values: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, ImageClassifierOutputWithNoAttention]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss). If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilevit(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs.pooler_output if return_dict else outputs[1] logits = self.classifier(self.dropout(pooled_output)) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=loss, logits=logits, hidden_states=outputs.hidden_states, ) class MobileViTASPPPooling(nn.Module): def __init__(self, config: MobileViTConfig, in_channels: int, out_channels: int) -> None: super().__init__() self.global_pool = nn.AdaptiveAvgPool2d(output_size=1) self.conv_1x1 = MobileViTConvLayer( config, in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, use_normalization=True, use_activation="relu", ) def forward(self, features: torch.Tensor) -> torch.Tensor: spatial_size = features.shape[-2:] features = self.global_pool(features) features = self.conv_1x1(features) features = nn.functional.interpolate(features, size=spatial_size, mode="bilinear", align_corners=False) return features class MobileViTASPP(nn.Module): """ ASPP module defined in DeepLab papers: https://arxiv.org/abs/1606.00915, https://arxiv.org/abs/1706.05587 """ def __init__(self, config: MobileViTConfig) -> None: super().__init__() in_channels = config.neck_hidden_sizes[-2] out_channels = config.aspp_out_channels if len(config.atrous_rates) != 3: raise ValueError("Expected 3 values for atrous_rates") self.convs = nn.ModuleList() in_projection = MobileViTConvLayer( config, in_channels=in_channels, out_channels=out_channels, kernel_size=1, use_activation="relu", ) self.convs.append(in_projection) self.convs.extend( [ MobileViTConvLayer( config, in_channels=in_channels, out_channels=out_channels, kernel_size=3, dilation=rate, use_activation="relu", ) for rate in config.atrous_rates ] ) pool_layer = MobileViTASPPPooling(config, in_channels, out_channels) self.convs.append(pool_layer) self.project = MobileViTConvLayer( config, in_channels=5 * out_channels, out_channels=out_channels, kernel_size=1, use_activation="relu" ) self.dropout = nn.Dropout(p=config.aspp_dropout_prob) def forward(self, features: torch.Tensor) -> torch.Tensor: pyramid = [] for conv in self.convs: pyramid.append(conv(features)) pyramid = torch.cat(pyramid, dim=1) pooled_features = self.project(pyramid) pooled_features = self.dropout(pooled_features) return pooled_features class MobileViTDeepLabV3(nn.Module): """ DeepLabv3 architecture: https://arxiv.org/abs/1706.05587 """ def __init__(self, config: MobileViTConfig) -> None: super().__init__() self.aspp = MobileViTASPP(config) self.dropout = nn.Dropout2d(config.classifier_dropout_prob) self.classifier = MobileViTConvLayer( config, in_channels=config.aspp_out_channels, out_channels=config.num_labels, kernel_size=1, use_normalization=False, use_activation=False, bias=True, ) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: features = self.aspp(hidden_states[-1]) features = self.dropout(features) features = self.classifier(features) return features @add_start_docstrings( """ MobileViT model with a semantic segmentation head on top, e.g. for Pascal VOC. """, MOBILEVIT_START_DOCSTRING, ) class MobileViTForSemanticSegmentation(MobileViTPreTrainedModel): def __init__(self, config: MobileViTConfig) -> None: super().__init__(config) self.num_labels = config.num_labels self.mobilevit = MobileViTModel(config, expand_output=False) self.segmentation_head = MobileViTDeepLabV3(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MOBILEVIT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=SemanticSegmenterOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, SemanticSegmenterOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy). Returns: Examples: ```python >>> from transformers import MobileViTFeatureExtractor, MobileViTForSemanticSegmentation >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> feature_extractor = MobileViTFeatureExtractor.from_pretrained("apple/deeplabv3-mobilevit-small") >>> model = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-small") >>> inputs = feature_extractor(images=image, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> # logits are of shape (batch_size, num_labels, height, width) >>> logits = outputs.logits ```""" output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilevit( pixel_values, output_hidden_states=True, # we need the intermediate hidden states return_dict=return_dict, ) encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1] logits = self.segmentation_head(encoder_hidden_states) loss = None if labels is not None: if self.config.num_labels == 1: raise ValueError("The number of labels should be greater than one") else: # upsample logits to the images' original size upsampled_logits = nn.functional.interpolate( logits, size=labels.shape[-2:], mode="bilinear", align_corners=False ) loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index) loss = loss_fct(upsampled_logits, labels) if not return_dict: if output_hidden_states: output = (logits,) + outputs[1:] else: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SemanticSegmenterOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None, )
# coding=utf-8 # Copyright 2022 Apple Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Original license: https://github.com/apple/ml-cvnets/blob/main/LICENSE """ PyTorch MobileViT model.""" import math from typing import Dict, Optional, Set, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, SemanticSegmenterOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_mobilevit import MobileViTConfig logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "MobileViTConfig" _FEAT_EXTRACTOR_FOR_DOC = "MobileViTFeatureExtractor" # Base docstring _CHECKPOINT_FOR_DOC = "apple/mobilevit-small" _EXPECTED_OUTPUT_SHAPE = [1, 640, 8, 8] # Image classification docstring _IMAGE_CLASS_CHECKPOINT = "apple/mobilevit-small" _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "apple/mobilevit-small", "apple/mobilevit-x-small", "apple/mobilevit-xx-small", "apple/deeplabv3-mobilevit-small", "apple/deeplabv3-mobilevit-x-small", "apple/deeplabv3-mobilevit-xx-small", # See all MobileViT models at https://huggingface.co/models?filter=mobilevit ] def make_divisible(value: int, divisor: int = 8, min_value: Optional[int] = None) -> int: """ Ensure that all layers have a channel count that is divisible by `divisor`. This function is taken from the original TensorFlow repo. It can be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py """ if min_value is None: min_value = divisor new_value = max(min_value, int(value + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than 10%. if new_value < 0.9 * value: new_value += divisor return int(new_value) class MobileViTConvLayer(nn.Module): def __init__( self, config: MobileViTConfig, in_channels: int, out_channels: int, kernel_size: int, stride: int = 1, groups: int = 1, bias: bool = False, dilation: int = 1, use_normalization: bool = True, use_activation: Union[bool, str] = True, ) -> None: super().__init__() padding = int((kernel_size - 1) / 2) * dilation if in_channels % groups != 0: raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups.") if out_channels % groups != 0: raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups.") self.convolution = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, padding_mode="zeros", ) if use_normalization: self.normalization = nn.BatchNorm2d( num_features=out_channels, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True, ) else: self.normalization = None if use_activation: if isinstance(use_activation, str): self.activation = ACT2FN[use_activation] elif isinstance(config.hidden_act, str): self.activation = ACT2FN[config.hidden_act] else: self.activation = config.hidden_act else: self.activation = None def forward(self, features: torch.Tensor) -> torch.Tensor: features = self.convolution(features) if self.normalization is not None: features = self.normalization(features) if self.activation is not None: features = self.activation(features) return features class MobileViTInvertedResidual(nn.Module): """ Inverted residual block (MobileNetv2): https://arxiv.org/abs/1801.04381 """ def __init__( self, config: MobileViTConfig, in_channels: int, out_channels: int, stride: int, dilation: int = 1 ) -> None: super().__init__() expanded_channels = make_divisible(int(round(in_channels * config.expand_ratio)), 8) if stride not in [1, 2]: raise ValueError(f"Invalid stride {stride}.") self.use_residual = (stride == 1) and (in_channels == out_channels) self.expand_1x1 = MobileViTConvLayer( config, in_channels=in_channels, out_channels=expanded_channels, kernel_size=1 ) self.conv_3x3 = MobileViTConvLayer( config, in_channels=expanded_channels, out_channels=expanded_channels, kernel_size=3, stride=stride, groups=expanded_channels, dilation=dilation, ) self.reduce_1x1 = MobileViTConvLayer( config, in_channels=expanded_channels, out_channels=out_channels, kernel_size=1, use_activation=False, ) def forward(self, features: torch.Tensor) -> torch.Tensor: residual = features features = self.expand_1x1(features) features = self.conv_3x3(features) features = self.reduce_1x1(features) return residual + features if self.use_residual else features class MobileViTMobileNetLayer(nn.Module): def __init__( self, config: MobileViTConfig, in_channels: int, out_channels: int, stride: int = 1, num_stages: int = 1 ) -> None: super().__init__() self.layer = nn.ModuleList() for i in range(num_stages): layer = MobileViTInvertedResidual( config, in_channels=in_channels, out_channels=out_channels, stride=stride if i == 0 else 1, ) self.layer.append(layer) in_channels = out_channels def forward(self, features: torch.Tensor) -> torch.Tensor: for layer_module in self.layer: features = layer_module(features) return features class MobileViTSelfAttention(nn.Module): def __init__(self, config: MobileViTConfig, hidden_size: int) -> None: super().__init__() if hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size {hidden_size,} is not a multiple of the number of attention " f"heads {config.num_attention_heads}." ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(hidden_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(hidden_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(hidden_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer class MobileViTSelfOutput(nn.Module): def __init__(self, config: MobileViTConfig, hidden_size: int) -> None: super().__init__() self.dense = nn.Linear(hidden_size, hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class MobileViTAttention(nn.Module): def __init__(self, config: MobileViTConfig, hidden_size: int) -> None: super().__init__() self.attention = MobileViTSelfAttention(config, hidden_size) self.output = MobileViTSelfOutput(config, hidden_size) self.pruned_heads = set() def prune_heads(self, heads: Set[int]) -> None: if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads ) # Prune linear layers self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: self_outputs = self.attention(hidden_states) attention_output = self.output(self_outputs) return attention_output class MobileViTIntermediate(nn.Module): def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int) -> None: super().__init__() self.dense = nn.Linear(hidden_size, intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class MobileViTOutput(nn.Module): def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int) -> None: super().__init__() self.dense = nn.Linear(intermediate_size, hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states class MobileViTTransformerLayer(nn.Module): def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int) -> None: super().__init__() self.attention = MobileViTAttention(config, hidden_size) self.intermediate = MobileViTIntermediate(config, hidden_size, intermediate_size) self.output = MobileViTOutput(config, hidden_size, intermediate_size) self.layernorm_before = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps) self.layernorm_after = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: attention_output = self.attention(self.layernorm_before(hidden_states)) hidden_states = attention_output + hidden_states layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) layer_output = self.output(layer_output, hidden_states) return layer_output class MobileViTTransformer(nn.Module): def __init__(self, config: MobileViTConfig, hidden_size: int, num_stages: int) -> None: super().__init__() self.layer = nn.ModuleList() for _ in range(num_stages): transformer_layer = MobileViTTransformerLayer( config, hidden_size=hidden_size, intermediate_size=int(hidden_size * config.mlp_ratio), ) self.layer.append(transformer_layer) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: for layer_module in self.layer: hidden_states = layer_module(hidden_states) return hidden_states class MobileViTLayer(nn.Module): """ MobileViT block: https://arxiv.org/abs/2110.02178 """ def __init__( self, config: MobileViTConfig, in_channels: int, out_channels: int, stride: int, hidden_size: int, num_stages: int, dilation: int = 1, ) -> None: super().__init__() self.patch_width = config.patch_size self.patch_height = config.patch_size if stride == 2: self.downsampling_layer = MobileViTInvertedResidual( config, in_channels=in_channels, out_channels=out_channels, stride=stride if dilation == 1 else 1, dilation=dilation // 2 if dilation > 1 else 1, ) in_channels = out_channels else: self.downsampling_layer = None self.conv_kxk = MobileViTConvLayer( config, in_channels=in_channels, out_channels=in_channels, kernel_size=config.conv_kernel_size, ) self.conv_1x1 = MobileViTConvLayer( config, in_channels=in_channels, out_channels=hidden_size, kernel_size=1, use_normalization=False, use_activation=False, ) self.transformer = MobileViTTransformer( config, hidden_size=hidden_size, num_stages=num_stages, ) self.layernorm = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps) self.conv_projection = MobileViTConvLayer( config, in_channels=hidden_size, out_channels=in_channels, kernel_size=1 ) self.fusion = MobileViTConvLayer( config, in_channels=2 * in_channels, out_channels=in_channels, kernel_size=config.conv_kernel_size ) def unfolding(self, features: torch.Tensor) -> Tuple[torch.Tensor, Dict]: patch_width, patch_height = self.patch_width, self.patch_height patch_area = int(patch_width * patch_height) batch_size, channels, orig_height, orig_width = features.shape new_height = int(math.ceil(orig_height / patch_height) * patch_height) new_width = int(math.ceil(orig_width / patch_width) * patch_width) interpolate = False if new_width != orig_width or new_height != orig_height: # Note: Padding can be done, but then it needs to be handled in attention function. features = nn.functional.interpolate( features, size=(new_height, new_width), mode="bilinear", align_corners=False ) interpolate = True # number of patches along width and height num_patch_width = new_width // patch_width num_patch_height = new_height // patch_height num_patches = num_patch_height * num_patch_width # convert from shape (batch_size, channels, orig_height, orig_width) # to the shape (batch_size * patch_area, num_patches, channels) patches = features.reshape( batch_size * channels * num_patch_height, patch_height, num_patch_width, patch_width ) patches = patches.transpose(1, 2) patches = patches.reshape(batch_size, channels, num_patches, patch_area) patches = patches.transpose(1, 3) patches = patches.reshape(batch_size * patch_area, num_patches, -1) info_dict = { "orig_size": (orig_height, orig_width), "batch_size": batch_size, "channels": channels, "interpolate": interpolate, "num_patches": num_patches, "num_patches_width": num_patch_width, "num_patches_height": num_patch_height, } return patches, info_dict def folding(self, patches: torch.Tensor, info_dict: Dict) -> torch.Tensor: patch_width, patch_height = self.patch_width, self.patch_height patch_area = int(patch_width * patch_height) batch_size = info_dict["batch_size"] channels = info_dict["channels"] num_patches = info_dict["num_patches"] num_patch_height = info_dict["num_patches_height"] num_patch_width = info_dict["num_patches_width"] # convert from shape (batch_size * patch_area, num_patches, channels) # back to shape (batch_size, channels, orig_height, orig_width) features = patches.contiguous().view(batch_size, patch_area, num_patches, -1) features = features.transpose(1, 3) features = features.reshape( batch_size * channels * num_patch_height, num_patch_width, patch_height, patch_width ) features = features.transpose(1, 2) features = features.reshape( batch_size, channels, num_patch_height * patch_height, num_patch_width * patch_width ) if info_dict["interpolate"]: features = nn.functional.interpolate( features, size=info_dict["orig_size"], mode="bilinear", align_corners=False ) return features def forward(self, features: torch.Tensor) -> torch.Tensor: # reduce spatial dimensions if needed if self.downsampling_layer: features = self.downsampling_layer(features) residual = features # local representation features = self.conv_kxk(features) features = self.conv_1x1(features) # convert feature map to patches patches, info_dict = self.unfolding(features) # learn global representations patches = self.transformer(patches) patches = self.layernorm(patches) # convert patches back to feature maps features = self.folding(patches, info_dict) features = self.conv_projection(features) features = self.fusion(torch.cat((residual, features), dim=1)) return features class MobileViTEncoder(nn.Module): def __init__(self, config: MobileViTConfig) -> None: super().__init__() self.config = config self.layer = nn.ModuleList() self.gradient_checkpointing = False # segmentation architectures like DeepLab and PSPNet modify the strides # of the classification backbones dilate_layer_4 = dilate_layer_5 = False if config.output_stride == 8: dilate_layer_4 = True dilate_layer_5 = True elif config.output_stride == 16: dilate_layer_5 = True dilation = 1 layer_1 = MobileViTMobileNetLayer( config, in_channels=config.neck_hidden_sizes[0], out_channels=config.neck_hidden_sizes[1], stride=1, num_stages=1, ) self.layer.append(layer_1) layer_2 = MobileViTMobileNetLayer( config, in_channels=config.neck_hidden_sizes[1], out_channels=config.neck_hidden_sizes[2], stride=2, num_stages=3, ) self.layer.append(layer_2) layer_3 = MobileViTLayer( config, in_channels=config.neck_hidden_sizes[2], out_channels=config.neck_hidden_sizes[3], stride=2, hidden_size=config.hidden_sizes[0], num_stages=2, ) self.layer.append(layer_3) if dilate_layer_4: dilation *= 2 layer_4 = MobileViTLayer( config, in_channels=config.neck_hidden_sizes[3], out_channels=config.neck_hidden_sizes[4], stride=2, hidden_size=config.hidden_sizes[1], num_stages=4, dilation=dilation, ) self.layer.append(layer_4) if dilate_layer_5: dilation *= 2 layer_5 = MobileViTLayer( config, in_channels=config.neck_hidden_sizes[4], out_channels=config.neck_hidden_sizes[5], stride=2, hidden_size=config.hidden_sizes[2], num_stages=3, dilation=dilation, ) self.layer.append(layer_5) def forward( self, hidden_states: torch.Tensor, output_hidden_states: bool = False, return_dict: bool = True, ) -> Union[tuple, BaseModelOutputWithNoAttention]: all_hidden_states = () if output_hidden_states else None for i, layer_module in enumerate(self.layer): if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, ) else: hidden_states = layer_module(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None) return BaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states) class MobileViTPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = MobileViTConfig base_model_prefix = "mobilevit" main_input_name = "pixel_values" supports_gradient_checkpointing = True def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, MobileViTEncoder): module.gradient_checkpointing = value MOBILEVIT_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MobileViTConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ MOBILEVIT_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`MobileViTFeatureExtractor`]. See [`MobileViTFeatureExtractor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare MobileViT model outputting raw hidden-states without any specific head on top.", MOBILEVIT_START_DOCSTRING, ) class MobileViTModel(MobileViTPreTrainedModel): def __init__(self, config: MobileViTConfig, expand_output: bool = True): super().__init__(config) self.config = config self.expand_output = expand_output self.conv_stem = MobileViTConvLayer( config, in_channels=config.num_channels, out_channels=config.neck_hidden_sizes[0], kernel_size=3, stride=2, ) self.encoder = MobileViTEncoder(config) if self.expand_output: self.conv_1x1_exp = MobileViTConvLayer( config, in_channels=config.neck_hidden_sizes[5], out_channels=config.neck_hidden_sizes[6], kernel_size=1, ) # Initialize weights and apply final processing self.post_init() def _prune_heads(self, heads_to_prune): """Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer_index, heads in heads_to_prune.items(): mobilevit_layer = self.encoder.layer[layer_index] if isinstance(mobilevit_layer, MobileViTLayer): for transformer_layer in mobilevit_layer.transformer.layer: transformer_layer.attention.prune_heads(heads) @add_start_docstrings_to_model_forward(MOBILEVIT_INPUTS_DOCSTRING) @add_code_sample_docstrings( processor_class=_FEAT_EXTRACTOR_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndNoAttention, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, pixel_values: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") embedding_output = self.conv_stem(pixel_values) encoder_outputs = self.encoder( embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.expand_output: last_hidden_state = self.conv_1x1_exp(encoder_outputs[0]) # global average pooling: (batch_size, channels, height, width) -> (batch_size, channels) pooled_output = torch.mean(last_hidden_state, dim=[-2, -1], keepdim=False) else: last_hidden_state = encoder_outputs[0] pooled_output = None if not return_dict: output = (last_hidden_state, pooled_output) if pooled_output is not None else (last_hidden_state,) return output + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, ) @add_start_docstrings( """ MobileViT model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """, MOBILEVIT_START_DOCSTRING, ) class MobileViTForImageClassification(MobileViTPreTrainedModel): def __init__(self, config: MobileViTConfig) -> None: super().__init__(config) self.num_labels = config.num_labels self.mobilevit = MobileViTModel(config) # Classifier head self.dropout = nn.Dropout(config.classifier_dropout_prob, inplace=True) self.classifier = ( nn.Linear(config.neck_hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MOBILEVIT_INPUTS_DOCSTRING) @add_code_sample_docstrings( processor_class=_FEAT_EXTRACTOR_FOR_DOC, checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=ImageClassifierOutputWithNoAttention, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def forward( self, pixel_values: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, ImageClassifierOutputWithNoAttention]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss). If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilevit(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs.pooler_output if return_dict else outputs[1] logits = self.classifier(self.dropout(pooled_output)) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=loss, logits=logits, hidden_states=outputs.hidden_states, ) class MobileViTASPPPooling(nn.Module): def __init__(self, config: MobileViTConfig, in_channels: int, out_channels: int) -> None: super().__init__() self.global_pool = nn.AdaptiveAvgPool2d(output_size=1) self.conv_1x1 = MobileViTConvLayer( config, in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, use_normalization=True, use_activation="relu", ) def forward(self, features: torch.Tensor) -> torch.Tensor: spatial_size = features.shape[-2:] features = self.global_pool(features) features = self.conv_1x1(features) features = nn.functional.interpolate(features, size=spatial_size, mode="bilinear", align_corners=False) return features class MobileViTASPP(nn.Module): """ ASPP module defined in DeepLab papers: https://arxiv.org/abs/1606.00915, https://arxiv.org/abs/1706.05587 """ def __init__(self, config: MobileViTConfig) -> None: super().__init__() in_channels = config.neck_hidden_sizes[-2] out_channels = config.aspp_out_channels if len(config.atrous_rates) != 3: raise ValueError("Expected 3 values for atrous_rates") self.convs = nn.ModuleList() in_projection = MobileViTConvLayer( config, in_channels=in_channels, out_channels=out_channels, kernel_size=1, use_activation="relu", ) self.convs.append(in_projection) self.convs.extend( [ MobileViTConvLayer( config, in_channels=in_channels, out_channels=out_channels, kernel_size=3, dilation=rate, use_activation="relu", ) for rate in config.atrous_rates ] ) pool_layer = MobileViTASPPPooling(config, in_channels, out_channels) self.convs.append(pool_layer) self.project = MobileViTConvLayer( config, in_channels=5 * out_channels, out_channels=out_channels, kernel_size=1, use_activation="relu" ) self.dropout = nn.Dropout(p=config.aspp_dropout_prob) def forward(self, features: torch.Tensor) -> torch.Tensor: pyramid = [] for conv in self.convs: pyramid.append(conv(features)) pyramid = torch.cat(pyramid, dim=1) pooled_features = self.project(pyramid) pooled_features = self.dropout(pooled_features) return pooled_features class MobileViTDeepLabV3(nn.Module): """ DeepLabv3 architecture: https://arxiv.org/abs/1706.05587 """ def __init__(self, config: MobileViTConfig) -> None: super().__init__() self.aspp = MobileViTASPP(config) self.dropout = nn.Dropout2d(config.classifier_dropout_prob) self.classifier = MobileViTConvLayer( config, in_channels=config.aspp_out_channels, out_channels=config.num_labels, kernel_size=1, use_normalization=False, use_activation=False, bias=True, ) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: features = self.aspp(hidden_states[-1]) features = self.dropout(features) features = self.classifier(features) return features @add_start_docstrings( """ MobileViT model with a semantic segmentation head on top, e.g. for Pascal VOC. """, MOBILEVIT_START_DOCSTRING, ) class MobileViTForSemanticSegmentation(MobileViTPreTrainedModel): def __init__(self, config: MobileViTConfig) -> None: super().__init__(config) self.num_labels = config.num_labels self.mobilevit = MobileViTModel(config, expand_output=False) self.segmentation_head = MobileViTDeepLabV3(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MOBILEVIT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=SemanticSegmenterOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, SemanticSegmenterOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy). Returns: Examples: ```python >>> from transformers import MobileViTFeatureExtractor, MobileViTForSemanticSegmentation >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> feature_extractor = MobileViTFeatureExtractor.from_pretrained("apple/deeplabv3-mobilevit-small") >>> model = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-small") >>> inputs = feature_extractor(images=image, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> # logits are of shape (batch_size, num_labels, height, width) >>> logits = outputs.logits ```""" output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilevit( pixel_values, output_hidden_states=True, # we need the intermediate hidden states return_dict=return_dict, ) encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1] logits = self.segmentation_head(encoder_hidden_states) loss = None if labels is not None: if self.config.num_labels == 1: raise ValueError("The number of labels should be greater than one") else: # upsample logits to the images' original size upsampled_logits = nn.functional.interpolate( logits, size=labels.shape[-2:], mode="bilinear", align_corners=False ) loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index) loss = loss_fct(upsampled_logits, labels) if not return_dict: if output_hidden_states: output = (logits,) + outputs[1:] else: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SemanticSegmenterOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None, )
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./src/transformers/models/mctct/configuration_mctct.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """M-CTC-T model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP = { "speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json", # See all M-CTC-T models at https://huggingface.co/models?filter=mctct } class MCTCTConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MCTCTModel`]. It is used to instantiate an M-CTC-T model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the M-CTC-T [speechbrain/m-ctc-t-large](https://huggingface.co/speechbrain/m-ctc-t-large) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 8065): Vocabulary size of the M-CTC-T model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MCTCTModel`]. hidden_size (`int`, *optional*, defaults to 1536): Dimension of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 36): Number of hidden layers in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 6144): Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 4): Number of attention heads for each attention layer in the Transformer encoder. attention_head_dim (`int`, *optional*, defaults to 384): Dimensions of each attention head for each attention layer in the Transformer encoder. max_position_embeddings (`int`, *optional*, defaults to 920): The maximum sequence length that this model might ever be used with (after log-mel spectrogram extraction). layer_norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon used by the layer normalization layers. layerdrop (`float`, *optional*, defaults to 0.3): The probability of dropping an encoder layer during training. The default 0.3 value is used in the original implementation. hidden_act (`str` or `function`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. pad_token_id (`int`, *optional*, defaults to 1): The tokenizer index of the pad token. bos_token_id (`int`, *optional*, defaults to 0): The tokenizer index of the bos token. eos_token_id (`int`, *optional*, defaults to 2): The tokenizer index of the eos token. conv_glu_dim (`int`, *optional*, defaults to 1): The dimension of the output of the `Conv1dSubsampler` layer in which GLU is applied on. Though the original Flashlight code uses the value of 2, here it's adapted to 1 due to transposition differences. conv_dropout (`int`, *optional*, defaults to 0.3): The probability of randomly dropping the `Conv1dSubsampler` layer during training. num_conv_layers (`int`, *optional*, defaults to 1): Number of convolution layers before applying transformer encoder layers. conv_kernel (`List[int]`, *optional*, defaults to `[7]`): The kernel size of the 1D convolution applied before transformer layers. `len(conv_kernel)` must be equal to `num_conv_layers`. conv_stride (`List[int]`, *optional*, defaults to `[3]`): The stride length of the 1D convolution applied before transformer layers. `len(conv_stride)` must be equal to `num_conv_layers`. input_feat_per_channel (`int`, *optional*, defaults to 80): Feature dimensions of the channels of the input to the Conv1D layer. input_channels (`int`, *optional*, defaults to 1): Number of input channels of the input to the Conv1D layer. conv_channels (`List[int]`, *optional*, defaults to None): Channel sizes of intermediate Conv1D layers. ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`): Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an instance of [`MCTCTForCTC`]. ctc_zero_infinity (`bool`, *optional*, defaults to `False`): Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance of [`MCTCTForCTC`]. Example: ```python >>> from transformers import MCTCTConfig, MCTCTModel >>> # Initializing a M-CTC-T mctct-large style configuration >>> configuration = MCTCTConfig() >>> # Initializing a model (with random weights) from the mctct-large style configuration >>> model = MCTCTModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "mctct" def __init__( self, vocab_size=8065, hidden_size=1536, num_hidden_layers=36, intermediate_size=6144, num_attention_heads=4, attention_head_dim=384, max_position_embeddings=920, layer_norm_eps=1e-5, layerdrop=0.3, hidden_act="relu", initializer_range=0.02, hidden_dropout_prob=0.3, attention_probs_dropout_prob=0.3, pad_token_id=1, bos_token_id=0, eos_token_id=2, conv_glu_dim=1, conv_dropout=0.3, num_conv_layers=1, conv_kernel=(7,), conv_stride=(3,), input_feat_per_channel=80, input_channels=1, conv_channels=None, ctc_loss_reduction="sum", ctc_zero_infinity=False, **kwargs ): super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.intermediate_size = intermediate_size self.num_attention_heads = num_attention_heads self.attention_head_dim = attention_head_dim self.max_position_embeddings = max_position_embeddings self.layer_norm_eps = layer_norm_eps self.layerdrop = layerdrop self.hidden_act = hidden_act self.initializer_range = initializer_range self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.conv_glu_dim = conv_glu_dim self.conv_dropout = conv_dropout self.num_conv_layers = num_conv_layers self.input_feat_per_channel = input_feat_per_channel self.input_channels = input_channels self.conv_channels = conv_channels self.ctc_loss_reduction = ctc_loss_reduction self.ctc_zero_infinity = ctc_zero_infinity # prevents config testing fail with exporting to json self.conv_kernel = list(conv_kernel) self.conv_stride = list(conv_stride) if len(self.conv_kernel) != self.num_conv_layers: raise ValueError( "Configuration for convolutional module is incorrect. " "It is required that `len(config.conv_kernel)` == `config.num_conv_layers` " f"but is `len(config.conv_kernel) = {len(self.conv_kernel)}`, " f"`config.num_conv_layers = {self.num_conv_layers}`." )
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """M-CTC-T model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP = { "speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json", # See all M-CTC-T models at https://huggingface.co/models?filter=mctct } class MCTCTConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MCTCTModel`]. It is used to instantiate an M-CTC-T model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the M-CTC-T [speechbrain/m-ctc-t-large](https://huggingface.co/speechbrain/m-ctc-t-large) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 8065): Vocabulary size of the M-CTC-T model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MCTCTModel`]. hidden_size (`int`, *optional*, defaults to 1536): Dimension of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 36): Number of hidden layers in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 6144): Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 4): Number of attention heads for each attention layer in the Transformer encoder. attention_head_dim (`int`, *optional*, defaults to 384): Dimensions of each attention head for each attention layer in the Transformer encoder. max_position_embeddings (`int`, *optional*, defaults to 920): The maximum sequence length that this model might ever be used with (after log-mel spectrogram extraction). layer_norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon used by the layer normalization layers. layerdrop (`float`, *optional*, defaults to 0.3): The probability of dropping an encoder layer during training. The default 0.3 value is used in the original implementation. hidden_act (`str` or `function`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. pad_token_id (`int`, *optional*, defaults to 1): The tokenizer index of the pad token. bos_token_id (`int`, *optional*, defaults to 0): The tokenizer index of the bos token. eos_token_id (`int`, *optional*, defaults to 2): The tokenizer index of the eos token. conv_glu_dim (`int`, *optional*, defaults to 1): The dimension of the output of the `Conv1dSubsampler` layer in which GLU is applied on. Though the original Flashlight code uses the value of 2, here it's adapted to 1 due to transposition differences. conv_dropout (`int`, *optional*, defaults to 0.3): The probability of randomly dropping the `Conv1dSubsampler` layer during training. num_conv_layers (`int`, *optional*, defaults to 1): Number of convolution layers before applying transformer encoder layers. conv_kernel (`List[int]`, *optional*, defaults to `[7]`): The kernel size of the 1D convolution applied before transformer layers. `len(conv_kernel)` must be equal to `num_conv_layers`. conv_stride (`List[int]`, *optional*, defaults to `[3]`): The stride length of the 1D convolution applied before transformer layers. `len(conv_stride)` must be equal to `num_conv_layers`. input_feat_per_channel (`int`, *optional*, defaults to 80): Feature dimensions of the channels of the input to the Conv1D layer. input_channels (`int`, *optional*, defaults to 1): Number of input channels of the input to the Conv1D layer. conv_channels (`List[int]`, *optional*, defaults to None): Channel sizes of intermediate Conv1D layers. ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`): Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an instance of [`MCTCTForCTC`]. ctc_zero_infinity (`bool`, *optional*, defaults to `False`): Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance of [`MCTCTForCTC`]. Example: ```python >>> from transformers import MCTCTConfig, MCTCTModel >>> # Initializing a M-CTC-T mctct-large style configuration >>> configuration = MCTCTConfig() >>> # Initializing a model (with random weights) from the mctct-large style configuration >>> model = MCTCTModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "mctct" def __init__( self, vocab_size=8065, hidden_size=1536, num_hidden_layers=36, intermediate_size=6144, num_attention_heads=4, attention_head_dim=384, max_position_embeddings=920, layer_norm_eps=1e-5, layerdrop=0.3, hidden_act="relu", initializer_range=0.02, hidden_dropout_prob=0.3, attention_probs_dropout_prob=0.3, pad_token_id=1, bos_token_id=0, eos_token_id=2, conv_glu_dim=1, conv_dropout=0.3, num_conv_layers=1, conv_kernel=(7,), conv_stride=(3,), input_feat_per_channel=80, input_channels=1, conv_channels=None, ctc_loss_reduction="sum", ctc_zero_infinity=False, **kwargs ): super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.intermediate_size = intermediate_size self.num_attention_heads = num_attention_heads self.attention_head_dim = attention_head_dim self.max_position_embeddings = max_position_embeddings self.layer_norm_eps = layer_norm_eps self.layerdrop = layerdrop self.hidden_act = hidden_act self.initializer_range = initializer_range self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.conv_glu_dim = conv_glu_dim self.conv_dropout = conv_dropout self.num_conv_layers = num_conv_layers self.input_feat_per_channel = input_feat_per_channel self.input_channels = input_channels self.conv_channels = conv_channels self.ctc_loss_reduction = ctc_loss_reduction self.ctc_zero_infinity = ctc_zero_infinity # prevents config testing fail with exporting to json self.conv_kernel = list(conv_kernel) self.conv_stride = list(conv_stride) if len(self.conv_kernel) != self.num_conv_layers: raise ValueError( "Configuration for convolutional module is incorrect. " "It is required that `len(config.conv_kernel)` == `config.num_conv_layers` " f"but is `len(config.conv_kernel) = {len(self.conv_kernel)}`, " f"`config.num_conv_layers = {self.num_conv_layers}`." )
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./src/transformers/models/mpnet/modeling_mpnet.py
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team, Microsoft Corporation. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch MPNet model.""" import math from typing import Optional, Tuple, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN, gelu from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mpnet import MPNetConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "microsoft/mpnet-base" _CONFIG_FOR_DOC = "MPNetConfig" _TOKENIZER_FOR_DOC = "MPNetTokenizer" MPNET_PRETRAINED_MODEL_ARCHIVE_LIST = [ "microsoft/mpnet-base", ] class MPNetPreTrainedModel(PreTrainedModel): config_class = MPNetConfig pretrained_model_archive_map = MPNET_PRETRAINED_MODEL_ARCHIVE_LIST base_model_prefix = "mpnet" def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) class MPNetEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.padding_idx = 1 self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=self.padding_idx) self.position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx ) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) def forward(self, input_ids=None, position_ids=None, inputs_embeds=None, **kwargs): if position_ids is None: if input_ids is not None: position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) embeddings = inputs_embeds + position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings def create_position_ids_from_inputs_embeds(self, inputs_embeds): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange( self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device ) return position_ids.unsqueeze(0).expand(input_shape) class MPNetSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.q = nn.Linear(config.hidden_size, self.all_head_size) self.k = nn.Linear(config.hidden_size, self.all_head_size) self.v = nn.Linear(config.hidden_size, self.all_head_size) self.o = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, attention_mask=None, head_mask=None, position_bias=None, output_attentions=False, **kwargs, ): q = self.q(hidden_states) k = self.k(hidden_states) v = self.v(hidden_states) q = self.transpose_for_scores(q) k = self.transpose_for_scores(k) v = self.transpose_for_scores(v) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(q, k.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) # Apply relative position embedding (precomputed in MPNetEncoder) if provided. if position_bias is not None: attention_scores += position_bias if attention_mask is not None: attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask c = torch.matmul(attention_probs, v) c = c.permute(0, 2, 1, 3).contiguous() new_c_shape = c.size()[:-2] + (self.all_head_size,) c = c.view(*new_c_shape) o = self.o(c) outputs = (o, attention_probs) if output_attentions else (o,) return outputs class MPNetAttention(nn.Module): def __init__(self, config): super().__init__() self.attn = MPNetSelfAttention(config) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.attn.num_attention_heads, self.attn.attention_head_size, self.pruned_heads ) self.attn.q = prune_linear_layer(self.attn.q, index) self.attn.k = prune_linear_layer(self.attn.k, index) self.attn.v = prune_linear_layer(self.attn.v, index) self.attn.o = prune_linear_layer(self.attn.o, index, dim=1) self.attn.num_attention_heads = self.attn.num_attention_heads - len(heads) self.attn.all_head_size = self.attn.attention_head_size * self.attn.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, attention_mask=None, head_mask=None, position_bias=None, output_attentions=False, **kwargs, ): self_outputs = self.attn( hidden_states, attention_mask, head_mask, position_bias, output_attentions=output_attentions, ) attention_output = self.LayerNorm(self.dropout(self_outputs[0]) + hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate class MPNetIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput class MPNetOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class MPNetLayer(nn.Module): def __init__(self, config): super().__init__() self.attention = MPNetAttention(config) self.intermediate = MPNetIntermediate(config) self.output = MPNetOutput(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, position_bias=None, output_attentions=False, **kwargs, ): self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, position_bias=position_bias, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) outputs = (layer_output,) + outputs return outputs class MPNetEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.n_heads = config.num_attention_heads self.layer = nn.ModuleList([MPNetLayer(config) for _ in range(config.num_hidden_layers)]) self.relative_attention_bias = nn.Embedding(config.relative_attention_num_buckets, self.n_heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = False, **kwargs, ): position_bias = self.compute_position_bias(hidden_states) all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask, head_mask[i], position_bias, output_attentions=output_attentions, **kwargs, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions, ) def compute_position_bias(self, x, position_ids=None, num_buckets=32): bsz, qlen, klen = x.size(0), x.size(1), x.size(1) if position_ids is not None: context_position = position_ids[:, :, None] memory_position = position_ids[:, None, :] else: context_position = torch.arange(qlen, dtype=torch.long)[:, None] memory_position = torch.arange(klen, dtype=torch.long)[None, :] relative_position = memory_position - context_position rp_bucket = self.relative_position_bucket(relative_position, num_buckets=num_buckets) rp_bucket = rp_bucket.to(x.device) values = self.relative_attention_bias(rp_bucket) values = values.permute([2, 0, 1]).unsqueeze(0) values = values.expand((bsz, -1, qlen, klen)).contiguous() return values @staticmethod def relative_position_bucket(relative_position, num_buckets=32, max_distance=128): ret = 0 n = -relative_position num_buckets //= 2 ret += (n < 0).to(torch.long) * num_buckets n = torch.abs(n) max_exact = num_buckets // 2 is_small = n < max_exact val_if_large = max_exact + ( torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1)) ret += torch.where(is_small, n, val_if_large) return ret # Copied from transformers.models.bert.modeling_bert.BertPooler class MPNetPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output MPNET_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MPNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ MPNET_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`MPNetTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare MPNet Model transformer outputting raw hidden-states without any specific head on top.", MPNET_START_DOCSTRING, ) class MPNetModel(MPNetPreTrainedModel): _keys_to_ignore_on_load_missing = [r"position_ids"] def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = MPNetEmbeddings(config) self.encoder = MPNetEncoder(config) self.pooler = MPNetPooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, inputs_embeds=inputs_embeds) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class MPNetForMaskedLM(MPNetPreTrainedModel): _keys_to_ignore_on_load_missing = [r"position_ids", r"lm_head.decoder"] _keys_to_ignore_on_load_unexpected = [r"pooler"] def __init__(self, config): super().__init__(config) self.mpnet = MPNetModel(config, add_pooling_layer=False) self.lm_head = MPNetLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mpnet( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class MPNetLMHead(nn.Module): """MPNet Head for masked and permuted language modeling.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, features, **kwargs): x = self.dense(features) x = gelu(x) x = self.layer_norm(x) # project back to size of vocabulary with bias x = self.decoder(x) return x @add_start_docstrings( """ MPNet Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, MPNET_START_DOCSTRING, ) class MPNetForSequenceClassification(MPNetPreTrainedModel): _keys_to_ignore_on_load_missing = [r"position_ids"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.mpnet = MPNetModel(config, add_pooling_layer=False) self.classifier = MPNetClassificationHead(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mpnet( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ MPNet Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, MPNET_START_DOCSTRING, ) class MPNetForMultipleChoice(MPNetPreTrainedModel): _keys_to_ignore_on_load_missing = [r"position_ids"] def __init__(self, config): super().__init__(config) self.mpnet = MPNetModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None flat_inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.mpnet( flat_input_ids, position_ids=flat_position_ids, attention_mask=flat_attention_mask, head_mask=head_mask, inputs_embeds=flat_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ MPNet Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, MPNET_START_DOCSTRING, ) class MPNetForTokenClassification(MPNetPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] _keys_to_ignore_on_load_missing = [r"position_ids"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.mpnet = MPNetModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mpnet( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class MPNetClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to BERT's [CLS] token) x = self.dropout(x) x = self.dense(x) x = torch.tanh(x) x = self.dropout(x) x = self.out_proj(x) return x @add_start_docstrings( """ MPNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, MPNET_START_DOCSTRING, ) class MPNetForQuestionAnswering(MPNetPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] _keys_to_ignore_on_load_missing = [r"position_ids"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.mpnet = MPNetModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mpnet( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def create_position_ids_from_input_ids(input_ids, padding_idx): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. :param torch.Tensor x: :return torch.Tensor: """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = input_ids.ne(padding_idx).int() incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask return incremental_indices.long() + padding_idx
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team, Microsoft Corporation. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch MPNet model.""" import math from typing import Optional, Tuple, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN, gelu from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mpnet import MPNetConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "microsoft/mpnet-base" _CONFIG_FOR_DOC = "MPNetConfig" _TOKENIZER_FOR_DOC = "MPNetTokenizer" MPNET_PRETRAINED_MODEL_ARCHIVE_LIST = [ "microsoft/mpnet-base", ] class MPNetPreTrainedModel(PreTrainedModel): config_class = MPNetConfig pretrained_model_archive_map = MPNET_PRETRAINED_MODEL_ARCHIVE_LIST base_model_prefix = "mpnet" def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) class MPNetEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.padding_idx = 1 self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=self.padding_idx) self.position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx ) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) def forward(self, input_ids=None, position_ids=None, inputs_embeds=None, **kwargs): if position_ids is None: if input_ids is not None: position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) embeddings = inputs_embeds + position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings def create_position_ids_from_inputs_embeds(self, inputs_embeds): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange( self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device ) return position_ids.unsqueeze(0).expand(input_shape) class MPNetSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.q = nn.Linear(config.hidden_size, self.all_head_size) self.k = nn.Linear(config.hidden_size, self.all_head_size) self.v = nn.Linear(config.hidden_size, self.all_head_size) self.o = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, attention_mask=None, head_mask=None, position_bias=None, output_attentions=False, **kwargs, ): q = self.q(hidden_states) k = self.k(hidden_states) v = self.v(hidden_states) q = self.transpose_for_scores(q) k = self.transpose_for_scores(k) v = self.transpose_for_scores(v) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(q, k.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) # Apply relative position embedding (precomputed in MPNetEncoder) if provided. if position_bias is not None: attention_scores += position_bias if attention_mask is not None: attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask c = torch.matmul(attention_probs, v) c = c.permute(0, 2, 1, 3).contiguous() new_c_shape = c.size()[:-2] + (self.all_head_size,) c = c.view(*new_c_shape) o = self.o(c) outputs = (o, attention_probs) if output_attentions else (o,) return outputs class MPNetAttention(nn.Module): def __init__(self, config): super().__init__() self.attn = MPNetSelfAttention(config) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.attn.num_attention_heads, self.attn.attention_head_size, self.pruned_heads ) self.attn.q = prune_linear_layer(self.attn.q, index) self.attn.k = prune_linear_layer(self.attn.k, index) self.attn.v = prune_linear_layer(self.attn.v, index) self.attn.o = prune_linear_layer(self.attn.o, index, dim=1) self.attn.num_attention_heads = self.attn.num_attention_heads - len(heads) self.attn.all_head_size = self.attn.attention_head_size * self.attn.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, attention_mask=None, head_mask=None, position_bias=None, output_attentions=False, **kwargs, ): self_outputs = self.attn( hidden_states, attention_mask, head_mask, position_bias, output_attentions=output_attentions, ) attention_output = self.LayerNorm(self.dropout(self_outputs[0]) + hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate class MPNetIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput class MPNetOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class MPNetLayer(nn.Module): def __init__(self, config): super().__init__() self.attention = MPNetAttention(config) self.intermediate = MPNetIntermediate(config) self.output = MPNetOutput(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, position_bias=None, output_attentions=False, **kwargs, ): self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, position_bias=position_bias, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) outputs = (layer_output,) + outputs return outputs class MPNetEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.n_heads = config.num_attention_heads self.layer = nn.ModuleList([MPNetLayer(config) for _ in range(config.num_hidden_layers)]) self.relative_attention_bias = nn.Embedding(config.relative_attention_num_buckets, self.n_heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = False, **kwargs, ): position_bias = self.compute_position_bias(hidden_states) all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask, head_mask[i], position_bias, output_attentions=output_attentions, **kwargs, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions, ) def compute_position_bias(self, x, position_ids=None, num_buckets=32): bsz, qlen, klen = x.size(0), x.size(1), x.size(1) if position_ids is not None: context_position = position_ids[:, :, None] memory_position = position_ids[:, None, :] else: context_position = torch.arange(qlen, dtype=torch.long)[:, None] memory_position = torch.arange(klen, dtype=torch.long)[None, :] relative_position = memory_position - context_position rp_bucket = self.relative_position_bucket(relative_position, num_buckets=num_buckets) rp_bucket = rp_bucket.to(x.device) values = self.relative_attention_bias(rp_bucket) values = values.permute([2, 0, 1]).unsqueeze(0) values = values.expand((bsz, -1, qlen, klen)).contiguous() return values @staticmethod def relative_position_bucket(relative_position, num_buckets=32, max_distance=128): ret = 0 n = -relative_position num_buckets //= 2 ret += (n < 0).to(torch.long) * num_buckets n = torch.abs(n) max_exact = num_buckets // 2 is_small = n < max_exact val_if_large = max_exact + ( torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1)) ret += torch.where(is_small, n, val_if_large) return ret # Copied from transformers.models.bert.modeling_bert.BertPooler class MPNetPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output MPNET_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MPNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ MPNET_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`MPNetTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare MPNet Model transformer outputting raw hidden-states without any specific head on top.", MPNET_START_DOCSTRING, ) class MPNetModel(MPNetPreTrainedModel): _keys_to_ignore_on_load_missing = [r"position_ids"] def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = MPNetEmbeddings(config) self.encoder = MPNetEncoder(config) self.pooler = MPNetPooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, inputs_embeds=inputs_embeds) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class MPNetForMaskedLM(MPNetPreTrainedModel): _keys_to_ignore_on_load_missing = [r"position_ids", r"lm_head.decoder"] _keys_to_ignore_on_load_unexpected = [r"pooler"] def __init__(self, config): super().__init__(config) self.mpnet = MPNetModel(config, add_pooling_layer=False) self.lm_head = MPNetLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mpnet( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class MPNetLMHead(nn.Module): """MPNet Head for masked and permuted language modeling.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, features, **kwargs): x = self.dense(features) x = gelu(x) x = self.layer_norm(x) # project back to size of vocabulary with bias x = self.decoder(x) return x @add_start_docstrings( """ MPNet Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, MPNET_START_DOCSTRING, ) class MPNetForSequenceClassification(MPNetPreTrainedModel): _keys_to_ignore_on_load_missing = [r"position_ids"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.mpnet = MPNetModel(config, add_pooling_layer=False) self.classifier = MPNetClassificationHead(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mpnet( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ MPNet Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, MPNET_START_DOCSTRING, ) class MPNetForMultipleChoice(MPNetPreTrainedModel): _keys_to_ignore_on_load_missing = [r"position_ids"] def __init__(self, config): super().__init__(config) self.mpnet = MPNetModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None flat_inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.mpnet( flat_input_ids, position_ids=flat_position_ids, attention_mask=flat_attention_mask, head_mask=head_mask, inputs_embeds=flat_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ MPNet Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, MPNET_START_DOCSTRING, ) class MPNetForTokenClassification(MPNetPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] _keys_to_ignore_on_load_missing = [r"position_ids"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.mpnet = MPNetModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mpnet( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class MPNetClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to BERT's [CLS] token) x = self.dropout(x) x = self.dense(x) x = torch.tanh(x) x = self.dropout(x) x = self.out_proj(x) return x @add_start_docstrings( """ MPNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, MPNET_START_DOCSTRING, ) class MPNetForQuestionAnswering(MPNetPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] _keys_to_ignore_on_load_missing = [r"position_ids"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.mpnet = MPNetModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mpnet( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def create_position_ids_from_input_ids(input_ids, padding_idx): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. :param torch.Tensor x: :return torch.Tensor: """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = input_ids.ne(padding_idx).int() incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask return incremental_indices.long() + padding_idx
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./src/transformers/models/maskformer/convert_maskformer_original_pytorch_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from argparse import ArgumentParser from dataclasses import dataclass from pathlib import Path from pprint import pformat from typing import Any, Dict, Iterator, List, Set, Tuple import torch import torchvision.transforms as T from PIL import Image from torch import Tensor, nn import requests from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import MetadataCatalog from detectron2.projects.deeplab import add_deeplab_config from transformers.models.maskformer.feature_extraction_maskformer import MaskFormerFeatureExtractor from transformers.models.maskformer.modeling_maskformer import ( MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerForInstanceSegmentationOutput, MaskFormerModel, MaskFormerModelOutput, ) from transformers.utils import logging StateDict = Dict[str, Tensor] logging.set_verbosity_info() logger = logging.get_logger() torch.manual_seed(0) class TrackedStateDict: def __init__(self, to_track: Dict): """This class "tracks" a python dictionary by keeping track of which item is accessed. Args: to_track (Dict): The dictionary we wish to track """ self.to_track = to_track self._seen: Set[str] = set() def __getitem__(self, key: str) -> Any: return self.to_track[key] def __setitem__(self, key: str, item: Any): self._seen.add(key) self.to_track[key] = item def diff(self) -> List[str]: """This method returns a set difference between the keys in the tracked state dict and the one we have access so far. This is an effective method to check if we have update all the keys Returns: List[str]: List of keys not yet updated """ return set(list(self.to_track.keys())) - self._seen def copy(self) -> Dict: # proxy the call to the internal dictionary return self.to_track.copy() # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" img_data = requests.get(url, stream=True).raw im = Image.open(img_data) return im @dataclass class Args: """Fake command line arguments needed by maskformer/detectron implementation""" config_file: str def setup_cfg(args: Args): # load config from file and command-line arguments cfg = get_cfg() add_deeplab_config(cfg) add_mask_former_config(cfg) cfg.merge_from_file(args.config_file) cfg.freeze() return cfg class OriginalMaskFormerConfigToOursConverter: def __call__(self, original_config: object) -> MaskFormerConfig: model = original_config.MODEL mask_former = model.MASK_FORMER swin = model.SWIN dataset_catalog = MetadataCatalog.get(original_config.DATASETS.TEST[0]) id2label = {idx: label for idx, label in enumerate(dataset_catalog.stuff_classes)} label2id = {label: idx for idx, label in id2label.items()} config: MaskFormerConfig = MaskFormerConfig( fpn_feature_size=model.SEM_SEG_HEAD.CONVS_DIM, mask_feature_size=model.SEM_SEG_HEAD.MASK_DIM, num_labels=model.SEM_SEG_HEAD.NUM_CLASSES, no_object_weight=mask_former.NO_OBJECT_WEIGHT, num_queries=mask_former.NUM_OBJECT_QUERIES, backbone_config=dict( pretrain_img_size=swin.PRETRAIN_IMG_SIZE, image_size=swin.PRETRAIN_IMG_SIZE, in_channels=3, patch_size=swin.PATCH_SIZE, embed_dim=swin.EMBED_DIM, depths=swin.DEPTHS, num_heads=swin.NUM_HEADS, window_size=swin.WINDOW_SIZE, drop_path_rate=swin.DROP_PATH_RATE, model_type="swin", ), dice_weight=mask_former.DICE_WEIGHT, ce_weight=1.0, mask_weight=mask_former.MASK_WEIGHT, decoder_config=dict( model_type="detr", max_position_embeddings=1024, encoder_layers=6, encoder_ffn_dim=2048, encoder_attention_heads=8, decoder_layers=mask_former.DEC_LAYERS, decoder_ffn_dim=mask_former.DIM_FEEDFORWARD, decoder_attention_heads=mask_former.NHEADS, encoder_layerdrop=0.0, decoder_layerdrop=0.0, d_model=mask_former.HIDDEN_DIM, dropout=mask_former.DROPOUT, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, init_xavier_std=1.0, scale_embedding=False, auxiliary_loss=False, dilation=False, # default pretrained config values ), id2label=id2label, label2id=label2id, ) return config class OriginalMaskFormerConfigToFeatureExtractorConverter: def __call__(self, original_config: object) -> MaskFormerFeatureExtractor: model = original_config.MODEL model_input = original_config.INPUT dataset_catalog = MetadataCatalog.get(original_config.DATASETS.TEST[0]) return MaskFormerFeatureExtractor( image_mean=(torch.tensor(model.PIXEL_MEAN) / 255).tolist(), image_std=(torch.tensor(model.PIXEL_STD) / 255).tolist(), size=model_input.MIN_SIZE_TEST, max_size=model_input.MAX_SIZE_TEST, num_labels=model.SEM_SEG_HEAD.NUM_CLASSES, ignore_index=dataset_catalog.ignore_label, size_divisibility=32, # 32 is required by swin ) class OriginalMaskFormerCheckpointToOursConverter: def __init__(self, original_model: nn.Module, config: MaskFormerConfig): self.original_model = original_model self.config = config def pop_all(self, renamed_keys: List[Tuple[str, str]], dst_state_dict: StateDict, src_state_dict: StateDict): for src_key, dst_key in renamed_keys: dst_state_dict[dst_key] = src_state_dict.pop(src_key) def replace_backbone(self, dst_state_dict: StateDict, src_state_dict: StateDict, config: MaskFormerConfig): dst_prefix: str = "pixel_level_module.encoder" src_prefix: str = "backbone" renamed_keys = [ ( f"{src_prefix}.patch_embed.proj.weight", f"{dst_prefix}.model.embeddings.patch_embeddings.projection.weight", ), (f"{src_prefix}.patch_embed.proj.bias", f"{dst_prefix}.model.embeddings.patch_embeddings.projection.bias"), (f"{src_prefix}.patch_embed.norm.weight", f"{dst_prefix}.model.embeddings.norm.weight"), (f"{src_prefix}.patch_embed.norm.bias", f"{dst_prefix}.model.embeddings.norm.bias"), ] num_layers = len(config.backbone_config.depths) for layer_idx in range(num_layers): for block_idx in range(config.backbone_config.depths[layer_idx]): renamed_keys.extend( [ # src, dst ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.weight", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.bias", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.bias", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_bias_table", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_bias_table", ), ] ) # now we need to handle the attentions # read in weights + bias of input projection layer of cross-attention src_att_weight = src_state_dict[f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight"] src_att_bias = src_state_dict[f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias"] size = src_att_weight.shape[0] offset = size // 3 dst_state_dict[ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.weight" ] = src_att_weight[:offset, :] dst_state_dict[ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.bias" ] = src_att_bias[:offset] dst_state_dict[ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.weight" ] = src_att_weight[offset : offset * 2, :] dst_state_dict[ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.bias" ] = src_att_bias[offset : offset * 2] dst_state_dict[ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.weight" ] = src_att_weight[-offset:, :] dst_state_dict[ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.bias" ] = src_att_bias[-offset:] # let's pop them src_state_dict.pop(f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight") src_state_dict.pop(f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias") # proj renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.weight", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.bias", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.bias", ), ] ) # second norm renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.weight", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.bias", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.bias", ), ] ) # mlp renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.weight", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.bias", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.bias", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.weight", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.bias", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.bias", ), ] ) renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_index", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_index", ) ] ) if layer_idx < num_layers - 1: # patch merging renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.downsample.reduction.weight", f"{dst_prefix}.model.encoder.layers.{layer_idx}.downsample.reduction.weight", ), ( f"{src_prefix}.layers.{layer_idx}.downsample.norm.weight", f"{dst_prefix}.model.encoder.layers.{layer_idx}.downsample.norm.weight", ), ( f"{src_prefix}.layers.{layer_idx}.downsample.norm.bias", f"{dst_prefix}.model.encoder.layers.{layer_idx}.downsample.norm.bias", ), ] ) # hidden states norms renamed_keys.extend( [ ( f"{src_prefix}.norm{layer_idx}.weight", f"{dst_prefix}.hidden_states_norms.{layer_idx}.weight", ), ( f"{src_prefix}.norm{layer_idx}.bias", f"{dst_prefix}.hidden_states_norms.{layer_idx}.bias", ), ] ) self.pop_all(renamed_keys, dst_state_dict, src_state_dict) def replace_pixel_module(self, dst_state_dict: StateDict, src_state_dict: StateDict): dst_prefix: str = "pixel_level_module.decoder" src_prefix: str = "sem_seg_head.pixel_decoder" self.replace_backbone(dst_state_dict, src_state_dict, self.config) def rename_keys_for_conv(detectron_conv: str, mine_conv: str): return [ (f"{detectron_conv}.weight", f"{mine_conv}.0.weight"), # 2 cuz the have act in the middle -> rename it (f"{detectron_conv}.norm.weight", f"{mine_conv}.1.weight"), (f"{detectron_conv}.norm.bias", f"{mine_conv}.1.bias"), ] renamed_keys = [ (f"{src_prefix}.mask_features.weight", f"{dst_prefix}.mask_projection.weight"), (f"{src_prefix}.mask_features.bias", f"{dst_prefix}.mask_projection.bias"), # the layers in the original one are in reverse order, stem is the last one! ] renamed_keys.extend(rename_keys_for_conv(f"{src_prefix}.layer_4", f"{dst_prefix}.fpn.stem")) # add all the fpn layers (here we need some config parameters to know the size in advance) for src_i, dst_i in zip(range(3, 0, -1), range(0, 3)): renamed_keys.extend( rename_keys_for_conv(f"{src_prefix}.adapter_{src_i}", f"{dst_prefix}.fpn.layers.{dst_i}.proj") ) renamed_keys.extend( rename_keys_for_conv(f"{src_prefix}.layer_{src_i}", f"{dst_prefix}.fpn.layers.{dst_i}.block") ) self.pop_all(renamed_keys, dst_state_dict, src_state_dict) def rename_keys_in_detr_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict): dst_prefix: str = "transformer_module.decoder" src_prefix: str = "sem_seg_head.predictor.transformer.decoder" # not sure why we are not popping direcetly here! # here we list all keys to be renamed (original name on the left, our name on the right) rename_keys = [] for i in range(self.config.decoder_config.decoder_layers): # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( f"{src_prefix}.layers.{i}.self_attn.out_proj.weight", f"{dst_prefix}.layers.{i}.self_attn.out_proj.weight", ) ) rename_keys.append( ( f"{src_prefix}.layers.{i}.self_attn.out_proj.bias", f"{dst_prefix}.layers.{i}.self_attn.out_proj.bias", ) ) rename_keys.append( ( f"{src_prefix}.layers.{i}.multihead_attn.out_proj.weight", f"{dst_prefix}.layers.{i}.encoder_attn.out_proj.weight", ) ) rename_keys.append( ( f"{src_prefix}.layers.{i}.multihead_attn.out_proj.bias", f"{dst_prefix}.layers.{i}.encoder_attn.out_proj.bias", ) ) rename_keys.append((f"{src_prefix}.layers.{i}.linear1.weight", f"{dst_prefix}.layers.{i}.fc1.weight")) rename_keys.append((f"{src_prefix}.layers.{i}.linear1.bias", f"{dst_prefix}.layers.{i}.fc1.bias")) rename_keys.append((f"{src_prefix}.layers.{i}.linear2.weight", f"{dst_prefix}.layers.{i}.fc2.weight")) rename_keys.append((f"{src_prefix}.layers.{i}.linear2.bias", f"{dst_prefix}.layers.{i}.fc2.bias")) rename_keys.append( (f"{src_prefix}.layers.{i}.norm1.weight", f"{dst_prefix}.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append( (f"{src_prefix}.layers.{i}.norm1.bias", f"{dst_prefix}.layers.{i}.self_attn_layer_norm.bias") ) rename_keys.append( (f"{src_prefix}.layers.{i}.norm2.weight", f"{dst_prefix}.layers.{i}.encoder_attn_layer_norm.weight") ) rename_keys.append( (f"{src_prefix}.layers.{i}.norm2.bias", f"{dst_prefix}.layers.{i}.encoder_attn_layer_norm.bias") ) rename_keys.append( (f"{src_prefix}.layers.{i}.norm3.weight", f"{dst_prefix}.layers.{i}.final_layer_norm.weight") ) rename_keys.append( (f"{src_prefix}.layers.{i}.norm3.bias", f"{dst_prefix}.layers.{i}.final_layer_norm.bias") ) return rename_keys def replace_q_k_v_in_detr_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict): dst_prefix: str = "transformer_module.decoder" src_prefix: str = "sem_seg_head.predictor.transformer.decoder" for i in range(self.config.decoder_config.decoder_layers): # read in weights + bias of input projection layer of self-attention in_proj_weight = src_state_dict.pop(f"{src_prefix}.layers.{i}.self_attn.in_proj_weight") in_proj_bias = src_state_dict.pop(f"{src_prefix}.layers.{i}.self_attn.in_proj_bias") # next, add query, keys and values (in that order) to the state dict dst_state_dict[f"{dst_prefix}.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :] dst_state_dict[f"{dst_prefix}.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256] dst_state_dict[f"{dst_prefix}.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :] dst_state_dict[f"{dst_prefix}.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512] dst_state_dict[f"{dst_prefix}.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :] dst_state_dict[f"{dst_prefix}.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention in_proj_weight_cross_attn = src_state_dict.pop(f"{src_prefix}.layers.{i}.multihead_attn.in_proj_weight") in_proj_bias_cross_attn = src_state_dict.pop(f"{src_prefix}.layers.{i}.multihead_attn.in_proj_bias") # next, add query, keys and values (in that order) of cross-attention to the state dict dst_state_dict[f"{dst_prefix}.layers.{i}.encoder_attn.q_proj.weight"] = in_proj_weight_cross_attn[:256, :] dst_state_dict[f"{dst_prefix}.layers.{i}.encoder_attn.q_proj.bias"] = in_proj_bias_cross_attn[:256] dst_state_dict[f"{dst_prefix}.layers.{i}.encoder_attn.k_proj.weight"] = in_proj_weight_cross_attn[ 256:512, : ] dst_state_dict[f"{dst_prefix}.layers.{i}.encoder_attn.k_proj.bias"] = in_proj_bias_cross_attn[256:512] dst_state_dict[f"{dst_prefix}.layers.{i}.encoder_attn.v_proj.weight"] = in_proj_weight_cross_attn[-256:, :] dst_state_dict[f"{dst_prefix}.layers.{i}.encoder_attn.v_proj.bias"] = in_proj_bias_cross_attn[-256:] def replace_detr_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict): dst_prefix: str = "transformer_module.decoder" src_prefix: str = "sem_seg_head.predictor.transformer.decoder" renamed_keys = self.rename_keys_in_detr_decoder(dst_state_dict, src_state_dict) # add more renamed_keys.extend( [ (f"{src_prefix}.norm.weight", f"{dst_prefix}.layernorm.weight"), (f"{src_prefix}.norm.bias", f"{dst_prefix}.layernorm.bias"), ] ) self.pop_all(renamed_keys, dst_state_dict, src_state_dict) self.replace_q_k_v_in_detr_decoder(dst_state_dict, src_state_dict) def replace_transformer_module(self, dst_state_dict: StateDict, src_state_dict: StateDict): dst_prefix: str = "transformer_module" src_prefix: str = "sem_seg_head.predictor" self.replace_detr_decoder(dst_state_dict, src_state_dict) renamed_keys = [ (f"{src_prefix}.query_embed.weight", f"{dst_prefix}.queries_embedder.weight"), (f"{src_prefix}.input_proj.weight", f"{dst_prefix}.input_projection.weight"), (f"{src_prefix}.input_proj.bias", f"{dst_prefix}.input_projection.bias"), ] self.pop_all(renamed_keys, dst_state_dict, src_state_dict) def replace_instance_segmentation_module(self, dst_state_dict: StateDict, src_state_dict: StateDict): # NOTE in our case we don't have a prefix, thus we removed the "." from the keys later on! dst_prefix: str = "" src_prefix: str = "sem_seg_head.predictor" renamed_keys = [ (f"{src_prefix}.class_embed.weight", f"{dst_prefix}class_predictor.weight"), (f"{src_prefix}.class_embed.bias", f"{dst_prefix}class_predictor.bias"), ] mlp_len = 3 for i in range(mlp_len): renamed_keys.extend( [ (f"{src_prefix}.mask_embed.layers.{i}.weight", f"{dst_prefix}mask_embedder.{i}.0.weight"), (f"{src_prefix}.mask_embed.layers.{i}.bias", f"{dst_prefix}mask_embedder.{i}.0.bias"), ] ) logger.info(f"Replacing keys {pformat(renamed_keys)}") self.pop_all(renamed_keys, dst_state_dict, src_state_dict) def convert(self, mask_former: MaskFormerModel) -> MaskFormerModel: dst_state_dict = TrackedStateDict(mask_former.state_dict()) src_state_dict = self.original_model.state_dict() self.replace_pixel_module(dst_state_dict, src_state_dict) self.replace_transformer_module(dst_state_dict, src_state_dict) logger.info(f"Missed keys are {pformat(dst_state_dict.diff())}") logger.info(f"Not copied keys are {pformat(src_state_dict.keys())}") logger.info("🙌 Done") mask_former.load_state_dict(dst_state_dict) return mask_former def convert_instance_segmentation( self, mask_former: MaskFormerForInstanceSegmentation ) -> MaskFormerForInstanceSegmentation: dst_state_dict = TrackedStateDict(mask_former.state_dict()) src_state_dict = self.original_model.state_dict() self.replace_instance_segmentation_module(dst_state_dict, src_state_dict) mask_former.load_state_dict(dst_state_dict) return mask_former @staticmethod def using_dirs(checkpoints_dir: Path, config_dir: Path) -> Iterator[Tuple[object, Path, Path]]: checkpoints: List[Path] = checkpoints_dir.glob("**/*.pkl") for checkpoint in checkpoints: logger.info(f"💪 Converting {checkpoint.stem}") # find associated config file config: Path = config_dir / checkpoint.parents[0].stem / "swin" / f"{checkpoint.stem}.yaml" yield config, checkpoint def test(original_model, our_model: MaskFormerForInstanceSegmentation, feature_extractor: MaskFormerFeatureExtractor): with torch.no_grad(): original_model = original_model.eval() our_model = our_model.eval() im = prepare_img() tr = T.Compose( [ T.Resize((384, 384)), T.ToTensor(), T.Normalize( mean=torch.tensor([123.675, 116.280, 103.530]) / 255.0, std=torch.tensor([58.395, 57.120, 57.375]) / 255.0, ), ], ) x = tr(im).unsqueeze(0) original_model_backbone_features = original_model.backbone(x.clone()) our_model_output: MaskFormerModelOutput = our_model.model(x.clone(), output_hidden_states=True) for original_model_feature, our_model_feature in zip( original_model_backbone_features.values(), our_model_output.encoder_hidden_states ): assert torch.allclose( original_model_feature, our_model_feature, atol=1e-3 ), "The backbone features are not the same." original_model_pixel_out = original_model.sem_seg_head.pixel_decoder.forward_features( original_model_backbone_features ) assert torch.allclose( original_model_pixel_out[0], our_model_output.pixel_decoder_last_hidden_state, atol=1e-4 ), "The pixel decoder feature are not the same" # let's test the full model original_model_out = original_model([{"image": x.squeeze(0)}]) original_segmentation = original_model_out[0]["sem_seg"] our_model_out: MaskFormerForInstanceSegmentationOutput = our_model(x) our_segmentation = feature_extractor.post_process_segmentation(our_model_out, target_size=(384, 384)) assert torch.allclose( original_segmentation, our_segmentation, atol=1e-3 ), "The segmentation image is not the same." logger.info("✅ Test passed!") def get_name(checkpoint_file: Path): model_name_raw: str = checkpoint_file.stem # model_name_raw is something like maskformer_panoptic_swin_base_IN21k_384_bs64_554k parent_name: str = checkpoint_file.parents[0].stem backbone = "swin" dataset = "" if "coco" in parent_name: dataset = "coco" elif "ade" in parent_name: dataset = "ade" else: raise ValueError(f"{parent_name} must be wrong since we didn't find 'coco' or 'ade' in it ") backbone_types = ["tiny", "small", "base", "large"] backbone_type = list(filter(lambda x: x in model_name_raw, backbone_types))[0] model_name = f"maskformer-{backbone}-{backbone_type}-{dataset}" return model_name if __name__ == "__main__": parser = ArgumentParser( description="Command line to convert the original maskformers (with swin backbone) to our implementations." ) parser.add_argument( "--checkpoints_dir", type=Path, help=( "A directory containing the model's checkpoints. The directory has to have the following structure:" " <DIR_NAME>/<DATASET_NAME>/<CONFIG_NAME>.pkl" ), ) parser.add_argument( "--configs_dir", type=Path, help=( "A directory containing the model's configs, see detectron2 doc. The directory has to have the following" " structure: <DIR_NAME>/<DATASET_NAME>/<CONFIG_NAME>.yaml" ), ) parser.add_argument( "--pytorch_dump_folder_path", required=True, type=Path, help="Path to the folder to output PyTorch models.", ) parser.add_argument( "--maskformer_dir", required=True, type=Path, help=( "A path to MaskFormer's original implementation directory. You can download from here:" " https://github.com/facebookresearch/MaskFormer" ), ) args = parser.parse_args() checkpoints_dir: Path = args.checkpoints_dir config_dir: Path = args.configs_dir save_directory: Path = args.pytorch_dump_folder_path maskformer_dir: Path = args.maskformer_dir # append the path to the parents to maskformer dir sys.path.append(str(maskformer_dir.parent)) # and import what's needed from MaskFormer.mask_former import add_mask_former_config from MaskFormer.mask_former.mask_former_model import MaskFormer as OriginalMaskFormer if not save_directory.exists(): save_directory.mkdir(parents=True) for config_file, checkpoint_file in OriginalMaskFormerCheckpointToOursConverter.using_dirs( checkpoints_dir, config_dir ): feature_extractor = OriginalMaskFormerConfigToFeatureExtractorConverter()( setup_cfg(Args(config_file=config_file)) ) original_config = setup_cfg(Args(config_file=config_file)) mask_former_kwargs = OriginalMaskFormer.from_config(original_config) original_model = OriginalMaskFormer(**mask_former_kwargs).eval() DetectionCheckpointer(original_model).load(str(checkpoint_file)) config: MaskFormerConfig = OriginalMaskFormerConfigToOursConverter()(original_config) mask_former = MaskFormerModel(config=config).eval() converter = OriginalMaskFormerCheckpointToOursConverter(original_model, config) maskformer = converter.convert(mask_former) mask_former_for_instance_segmentation = MaskFormerForInstanceSegmentation(config=config).eval() mask_former_for_instance_segmentation.model = mask_former mask_former_for_instance_segmentation = converter.convert_instance_segmentation( mask_former_for_instance_segmentation ) test(original_model, mask_former_for_instance_segmentation, feature_extractor) model_name = get_name(checkpoint_file) logger.info(f"🪄 Saving {model_name}") feature_extractor.save_pretrained(save_directory / model_name) mask_former_for_instance_segmentation.save_pretrained(save_directory / model_name) feature_extractor.push_to_hub( repo_path_or_name=save_directory / model_name, commit_message="Add model", use_temp_dir=True, ) mask_former_for_instance_segmentation.push_to_hub( repo_path_or_name=save_directory / model_name, commit_message="Add model", use_temp_dir=True, )
# coding=utf-8 # Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from argparse import ArgumentParser from dataclasses import dataclass from pathlib import Path from pprint import pformat from typing import Any, Dict, Iterator, List, Set, Tuple import torch import torchvision.transforms as T from PIL import Image from torch import Tensor, nn import requests from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import MetadataCatalog from detectron2.projects.deeplab import add_deeplab_config from transformers.models.maskformer.feature_extraction_maskformer import MaskFormerFeatureExtractor from transformers.models.maskformer.modeling_maskformer import ( MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerForInstanceSegmentationOutput, MaskFormerModel, MaskFormerModelOutput, ) from transformers.utils import logging StateDict = Dict[str, Tensor] logging.set_verbosity_info() logger = logging.get_logger() torch.manual_seed(0) class TrackedStateDict: def __init__(self, to_track: Dict): """This class "tracks" a python dictionary by keeping track of which item is accessed. Args: to_track (Dict): The dictionary we wish to track """ self.to_track = to_track self._seen: Set[str] = set() def __getitem__(self, key: str) -> Any: return self.to_track[key] def __setitem__(self, key: str, item: Any): self._seen.add(key) self.to_track[key] = item def diff(self) -> List[str]: """This method returns a set difference between the keys in the tracked state dict and the one we have access so far. This is an effective method to check if we have update all the keys Returns: List[str]: List of keys not yet updated """ return set(list(self.to_track.keys())) - self._seen def copy(self) -> Dict: # proxy the call to the internal dictionary return self.to_track.copy() # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" img_data = requests.get(url, stream=True).raw im = Image.open(img_data) return im @dataclass class Args: """Fake command line arguments needed by maskformer/detectron implementation""" config_file: str def setup_cfg(args: Args): # load config from file and command-line arguments cfg = get_cfg() add_deeplab_config(cfg) add_mask_former_config(cfg) cfg.merge_from_file(args.config_file) cfg.freeze() return cfg class OriginalMaskFormerConfigToOursConverter: def __call__(self, original_config: object) -> MaskFormerConfig: model = original_config.MODEL mask_former = model.MASK_FORMER swin = model.SWIN dataset_catalog = MetadataCatalog.get(original_config.DATASETS.TEST[0]) id2label = {idx: label for idx, label in enumerate(dataset_catalog.stuff_classes)} label2id = {label: idx for idx, label in id2label.items()} config: MaskFormerConfig = MaskFormerConfig( fpn_feature_size=model.SEM_SEG_HEAD.CONVS_DIM, mask_feature_size=model.SEM_SEG_HEAD.MASK_DIM, num_labels=model.SEM_SEG_HEAD.NUM_CLASSES, no_object_weight=mask_former.NO_OBJECT_WEIGHT, num_queries=mask_former.NUM_OBJECT_QUERIES, backbone_config=dict( pretrain_img_size=swin.PRETRAIN_IMG_SIZE, image_size=swin.PRETRAIN_IMG_SIZE, in_channels=3, patch_size=swin.PATCH_SIZE, embed_dim=swin.EMBED_DIM, depths=swin.DEPTHS, num_heads=swin.NUM_HEADS, window_size=swin.WINDOW_SIZE, drop_path_rate=swin.DROP_PATH_RATE, model_type="swin", ), dice_weight=mask_former.DICE_WEIGHT, ce_weight=1.0, mask_weight=mask_former.MASK_WEIGHT, decoder_config=dict( model_type="detr", max_position_embeddings=1024, encoder_layers=6, encoder_ffn_dim=2048, encoder_attention_heads=8, decoder_layers=mask_former.DEC_LAYERS, decoder_ffn_dim=mask_former.DIM_FEEDFORWARD, decoder_attention_heads=mask_former.NHEADS, encoder_layerdrop=0.0, decoder_layerdrop=0.0, d_model=mask_former.HIDDEN_DIM, dropout=mask_former.DROPOUT, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, init_xavier_std=1.0, scale_embedding=False, auxiliary_loss=False, dilation=False, # default pretrained config values ), id2label=id2label, label2id=label2id, ) return config class OriginalMaskFormerConfigToFeatureExtractorConverter: def __call__(self, original_config: object) -> MaskFormerFeatureExtractor: model = original_config.MODEL model_input = original_config.INPUT dataset_catalog = MetadataCatalog.get(original_config.DATASETS.TEST[0]) return MaskFormerFeatureExtractor( image_mean=(torch.tensor(model.PIXEL_MEAN) / 255).tolist(), image_std=(torch.tensor(model.PIXEL_STD) / 255).tolist(), size=model_input.MIN_SIZE_TEST, max_size=model_input.MAX_SIZE_TEST, num_labels=model.SEM_SEG_HEAD.NUM_CLASSES, ignore_index=dataset_catalog.ignore_label, size_divisibility=32, # 32 is required by swin ) class OriginalMaskFormerCheckpointToOursConverter: def __init__(self, original_model: nn.Module, config: MaskFormerConfig): self.original_model = original_model self.config = config def pop_all(self, renamed_keys: List[Tuple[str, str]], dst_state_dict: StateDict, src_state_dict: StateDict): for src_key, dst_key in renamed_keys: dst_state_dict[dst_key] = src_state_dict.pop(src_key) def replace_backbone(self, dst_state_dict: StateDict, src_state_dict: StateDict, config: MaskFormerConfig): dst_prefix: str = "pixel_level_module.encoder" src_prefix: str = "backbone" renamed_keys = [ ( f"{src_prefix}.patch_embed.proj.weight", f"{dst_prefix}.model.embeddings.patch_embeddings.projection.weight", ), (f"{src_prefix}.patch_embed.proj.bias", f"{dst_prefix}.model.embeddings.patch_embeddings.projection.bias"), (f"{src_prefix}.patch_embed.norm.weight", f"{dst_prefix}.model.embeddings.norm.weight"), (f"{src_prefix}.patch_embed.norm.bias", f"{dst_prefix}.model.embeddings.norm.bias"), ] num_layers = len(config.backbone_config.depths) for layer_idx in range(num_layers): for block_idx in range(config.backbone_config.depths[layer_idx]): renamed_keys.extend( [ # src, dst ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.weight", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.bias", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.bias", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_bias_table", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_bias_table", ), ] ) # now we need to handle the attentions # read in weights + bias of input projection layer of cross-attention src_att_weight = src_state_dict[f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight"] src_att_bias = src_state_dict[f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias"] size = src_att_weight.shape[0] offset = size // 3 dst_state_dict[ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.weight" ] = src_att_weight[:offset, :] dst_state_dict[ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.bias" ] = src_att_bias[:offset] dst_state_dict[ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.weight" ] = src_att_weight[offset : offset * 2, :] dst_state_dict[ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.bias" ] = src_att_bias[offset : offset * 2] dst_state_dict[ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.weight" ] = src_att_weight[-offset:, :] dst_state_dict[ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.bias" ] = src_att_bias[-offset:] # let's pop them src_state_dict.pop(f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight") src_state_dict.pop(f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias") # proj renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.weight", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.bias", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.bias", ), ] ) # second norm renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.weight", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.bias", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.bias", ), ] ) # mlp renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.weight", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.bias", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.bias", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.weight", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.bias", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.bias", ), ] ) renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_index", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_index", ) ] ) if layer_idx < num_layers - 1: # patch merging renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.downsample.reduction.weight", f"{dst_prefix}.model.encoder.layers.{layer_idx}.downsample.reduction.weight", ), ( f"{src_prefix}.layers.{layer_idx}.downsample.norm.weight", f"{dst_prefix}.model.encoder.layers.{layer_idx}.downsample.norm.weight", ), ( f"{src_prefix}.layers.{layer_idx}.downsample.norm.bias", f"{dst_prefix}.model.encoder.layers.{layer_idx}.downsample.norm.bias", ), ] ) # hidden states norms renamed_keys.extend( [ ( f"{src_prefix}.norm{layer_idx}.weight", f"{dst_prefix}.hidden_states_norms.{layer_idx}.weight", ), ( f"{src_prefix}.norm{layer_idx}.bias", f"{dst_prefix}.hidden_states_norms.{layer_idx}.bias", ), ] ) self.pop_all(renamed_keys, dst_state_dict, src_state_dict) def replace_pixel_module(self, dst_state_dict: StateDict, src_state_dict: StateDict): dst_prefix: str = "pixel_level_module.decoder" src_prefix: str = "sem_seg_head.pixel_decoder" self.replace_backbone(dst_state_dict, src_state_dict, self.config) def rename_keys_for_conv(detectron_conv: str, mine_conv: str): return [ (f"{detectron_conv}.weight", f"{mine_conv}.0.weight"), # 2 cuz the have act in the middle -> rename it (f"{detectron_conv}.norm.weight", f"{mine_conv}.1.weight"), (f"{detectron_conv}.norm.bias", f"{mine_conv}.1.bias"), ] renamed_keys = [ (f"{src_prefix}.mask_features.weight", f"{dst_prefix}.mask_projection.weight"), (f"{src_prefix}.mask_features.bias", f"{dst_prefix}.mask_projection.bias"), # the layers in the original one are in reverse order, stem is the last one! ] renamed_keys.extend(rename_keys_for_conv(f"{src_prefix}.layer_4", f"{dst_prefix}.fpn.stem")) # add all the fpn layers (here we need some config parameters to know the size in advance) for src_i, dst_i in zip(range(3, 0, -1), range(0, 3)): renamed_keys.extend( rename_keys_for_conv(f"{src_prefix}.adapter_{src_i}", f"{dst_prefix}.fpn.layers.{dst_i}.proj") ) renamed_keys.extend( rename_keys_for_conv(f"{src_prefix}.layer_{src_i}", f"{dst_prefix}.fpn.layers.{dst_i}.block") ) self.pop_all(renamed_keys, dst_state_dict, src_state_dict) def rename_keys_in_detr_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict): dst_prefix: str = "transformer_module.decoder" src_prefix: str = "sem_seg_head.predictor.transformer.decoder" # not sure why we are not popping direcetly here! # here we list all keys to be renamed (original name on the left, our name on the right) rename_keys = [] for i in range(self.config.decoder_config.decoder_layers): # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( f"{src_prefix}.layers.{i}.self_attn.out_proj.weight", f"{dst_prefix}.layers.{i}.self_attn.out_proj.weight", ) ) rename_keys.append( ( f"{src_prefix}.layers.{i}.self_attn.out_proj.bias", f"{dst_prefix}.layers.{i}.self_attn.out_proj.bias", ) ) rename_keys.append( ( f"{src_prefix}.layers.{i}.multihead_attn.out_proj.weight", f"{dst_prefix}.layers.{i}.encoder_attn.out_proj.weight", ) ) rename_keys.append( ( f"{src_prefix}.layers.{i}.multihead_attn.out_proj.bias", f"{dst_prefix}.layers.{i}.encoder_attn.out_proj.bias", ) ) rename_keys.append((f"{src_prefix}.layers.{i}.linear1.weight", f"{dst_prefix}.layers.{i}.fc1.weight")) rename_keys.append((f"{src_prefix}.layers.{i}.linear1.bias", f"{dst_prefix}.layers.{i}.fc1.bias")) rename_keys.append((f"{src_prefix}.layers.{i}.linear2.weight", f"{dst_prefix}.layers.{i}.fc2.weight")) rename_keys.append((f"{src_prefix}.layers.{i}.linear2.bias", f"{dst_prefix}.layers.{i}.fc2.bias")) rename_keys.append( (f"{src_prefix}.layers.{i}.norm1.weight", f"{dst_prefix}.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append( (f"{src_prefix}.layers.{i}.norm1.bias", f"{dst_prefix}.layers.{i}.self_attn_layer_norm.bias") ) rename_keys.append( (f"{src_prefix}.layers.{i}.norm2.weight", f"{dst_prefix}.layers.{i}.encoder_attn_layer_norm.weight") ) rename_keys.append( (f"{src_prefix}.layers.{i}.norm2.bias", f"{dst_prefix}.layers.{i}.encoder_attn_layer_norm.bias") ) rename_keys.append( (f"{src_prefix}.layers.{i}.norm3.weight", f"{dst_prefix}.layers.{i}.final_layer_norm.weight") ) rename_keys.append( (f"{src_prefix}.layers.{i}.norm3.bias", f"{dst_prefix}.layers.{i}.final_layer_norm.bias") ) return rename_keys def replace_q_k_v_in_detr_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict): dst_prefix: str = "transformer_module.decoder" src_prefix: str = "sem_seg_head.predictor.transformer.decoder" for i in range(self.config.decoder_config.decoder_layers): # read in weights + bias of input projection layer of self-attention in_proj_weight = src_state_dict.pop(f"{src_prefix}.layers.{i}.self_attn.in_proj_weight") in_proj_bias = src_state_dict.pop(f"{src_prefix}.layers.{i}.self_attn.in_proj_bias") # next, add query, keys and values (in that order) to the state dict dst_state_dict[f"{dst_prefix}.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :] dst_state_dict[f"{dst_prefix}.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256] dst_state_dict[f"{dst_prefix}.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :] dst_state_dict[f"{dst_prefix}.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512] dst_state_dict[f"{dst_prefix}.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :] dst_state_dict[f"{dst_prefix}.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention in_proj_weight_cross_attn = src_state_dict.pop(f"{src_prefix}.layers.{i}.multihead_attn.in_proj_weight") in_proj_bias_cross_attn = src_state_dict.pop(f"{src_prefix}.layers.{i}.multihead_attn.in_proj_bias") # next, add query, keys and values (in that order) of cross-attention to the state dict dst_state_dict[f"{dst_prefix}.layers.{i}.encoder_attn.q_proj.weight"] = in_proj_weight_cross_attn[:256, :] dst_state_dict[f"{dst_prefix}.layers.{i}.encoder_attn.q_proj.bias"] = in_proj_bias_cross_attn[:256] dst_state_dict[f"{dst_prefix}.layers.{i}.encoder_attn.k_proj.weight"] = in_proj_weight_cross_attn[ 256:512, : ] dst_state_dict[f"{dst_prefix}.layers.{i}.encoder_attn.k_proj.bias"] = in_proj_bias_cross_attn[256:512] dst_state_dict[f"{dst_prefix}.layers.{i}.encoder_attn.v_proj.weight"] = in_proj_weight_cross_attn[-256:, :] dst_state_dict[f"{dst_prefix}.layers.{i}.encoder_attn.v_proj.bias"] = in_proj_bias_cross_attn[-256:] def replace_detr_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict): dst_prefix: str = "transformer_module.decoder" src_prefix: str = "sem_seg_head.predictor.transformer.decoder" renamed_keys = self.rename_keys_in_detr_decoder(dst_state_dict, src_state_dict) # add more renamed_keys.extend( [ (f"{src_prefix}.norm.weight", f"{dst_prefix}.layernorm.weight"), (f"{src_prefix}.norm.bias", f"{dst_prefix}.layernorm.bias"), ] ) self.pop_all(renamed_keys, dst_state_dict, src_state_dict) self.replace_q_k_v_in_detr_decoder(dst_state_dict, src_state_dict) def replace_transformer_module(self, dst_state_dict: StateDict, src_state_dict: StateDict): dst_prefix: str = "transformer_module" src_prefix: str = "sem_seg_head.predictor" self.replace_detr_decoder(dst_state_dict, src_state_dict) renamed_keys = [ (f"{src_prefix}.query_embed.weight", f"{dst_prefix}.queries_embedder.weight"), (f"{src_prefix}.input_proj.weight", f"{dst_prefix}.input_projection.weight"), (f"{src_prefix}.input_proj.bias", f"{dst_prefix}.input_projection.bias"), ] self.pop_all(renamed_keys, dst_state_dict, src_state_dict) def replace_instance_segmentation_module(self, dst_state_dict: StateDict, src_state_dict: StateDict): # NOTE in our case we don't have a prefix, thus we removed the "." from the keys later on! dst_prefix: str = "" src_prefix: str = "sem_seg_head.predictor" renamed_keys = [ (f"{src_prefix}.class_embed.weight", f"{dst_prefix}class_predictor.weight"), (f"{src_prefix}.class_embed.bias", f"{dst_prefix}class_predictor.bias"), ] mlp_len = 3 for i in range(mlp_len): renamed_keys.extend( [ (f"{src_prefix}.mask_embed.layers.{i}.weight", f"{dst_prefix}mask_embedder.{i}.0.weight"), (f"{src_prefix}.mask_embed.layers.{i}.bias", f"{dst_prefix}mask_embedder.{i}.0.bias"), ] ) logger.info(f"Replacing keys {pformat(renamed_keys)}") self.pop_all(renamed_keys, dst_state_dict, src_state_dict) def convert(self, mask_former: MaskFormerModel) -> MaskFormerModel: dst_state_dict = TrackedStateDict(mask_former.state_dict()) src_state_dict = self.original_model.state_dict() self.replace_pixel_module(dst_state_dict, src_state_dict) self.replace_transformer_module(dst_state_dict, src_state_dict) logger.info(f"Missed keys are {pformat(dst_state_dict.diff())}") logger.info(f"Not copied keys are {pformat(src_state_dict.keys())}") logger.info("🙌 Done") mask_former.load_state_dict(dst_state_dict) return mask_former def convert_instance_segmentation( self, mask_former: MaskFormerForInstanceSegmentation ) -> MaskFormerForInstanceSegmentation: dst_state_dict = TrackedStateDict(mask_former.state_dict()) src_state_dict = self.original_model.state_dict() self.replace_instance_segmentation_module(dst_state_dict, src_state_dict) mask_former.load_state_dict(dst_state_dict) return mask_former @staticmethod def using_dirs(checkpoints_dir: Path, config_dir: Path) -> Iterator[Tuple[object, Path, Path]]: checkpoints: List[Path] = checkpoints_dir.glob("**/*.pkl") for checkpoint in checkpoints: logger.info(f"💪 Converting {checkpoint.stem}") # find associated config file config: Path = config_dir / checkpoint.parents[0].stem / "swin" / f"{checkpoint.stem}.yaml" yield config, checkpoint def test(original_model, our_model: MaskFormerForInstanceSegmentation, feature_extractor: MaskFormerFeatureExtractor): with torch.no_grad(): original_model = original_model.eval() our_model = our_model.eval() im = prepare_img() tr = T.Compose( [ T.Resize((384, 384)), T.ToTensor(), T.Normalize( mean=torch.tensor([123.675, 116.280, 103.530]) / 255.0, std=torch.tensor([58.395, 57.120, 57.375]) / 255.0, ), ], ) x = tr(im).unsqueeze(0) original_model_backbone_features = original_model.backbone(x.clone()) our_model_output: MaskFormerModelOutput = our_model.model(x.clone(), output_hidden_states=True) for original_model_feature, our_model_feature in zip( original_model_backbone_features.values(), our_model_output.encoder_hidden_states ): assert torch.allclose( original_model_feature, our_model_feature, atol=1e-3 ), "The backbone features are not the same." original_model_pixel_out = original_model.sem_seg_head.pixel_decoder.forward_features( original_model_backbone_features ) assert torch.allclose( original_model_pixel_out[0], our_model_output.pixel_decoder_last_hidden_state, atol=1e-4 ), "The pixel decoder feature are not the same" # let's test the full model original_model_out = original_model([{"image": x.squeeze(0)}]) original_segmentation = original_model_out[0]["sem_seg"] our_model_out: MaskFormerForInstanceSegmentationOutput = our_model(x) our_segmentation = feature_extractor.post_process_segmentation(our_model_out, target_size=(384, 384)) assert torch.allclose( original_segmentation, our_segmentation, atol=1e-3 ), "The segmentation image is not the same." logger.info("✅ Test passed!") def get_name(checkpoint_file: Path): model_name_raw: str = checkpoint_file.stem # model_name_raw is something like maskformer_panoptic_swin_base_IN21k_384_bs64_554k parent_name: str = checkpoint_file.parents[0].stem backbone = "swin" dataset = "" if "coco" in parent_name: dataset = "coco" elif "ade" in parent_name: dataset = "ade" else: raise ValueError(f"{parent_name} must be wrong since we didn't find 'coco' or 'ade' in it ") backbone_types = ["tiny", "small", "base", "large"] backbone_type = list(filter(lambda x: x in model_name_raw, backbone_types))[0] model_name = f"maskformer-{backbone}-{backbone_type}-{dataset}" return model_name if __name__ == "__main__": parser = ArgumentParser( description="Command line to convert the original maskformers (with swin backbone) to our implementations." ) parser.add_argument( "--checkpoints_dir", type=Path, help=( "A directory containing the model's checkpoints. The directory has to have the following structure:" " <DIR_NAME>/<DATASET_NAME>/<CONFIG_NAME>.pkl" ), ) parser.add_argument( "--configs_dir", type=Path, help=( "A directory containing the model's configs, see detectron2 doc. The directory has to have the following" " structure: <DIR_NAME>/<DATASET_NAME>/<CONFIG_NAME>.yaml" ), ) parser.add_argument( "--pytorch_dump_folder_path", required=True, type=Path, help="Path to the folder to output PyTorch models.", ) parser.add_argument( "--maskformer_dir", required=True, type=Path, help=( "A path to MaskFormer's original implementation directory. You can download from here:" " https://github.com/facebookresearch/MaskFormer" ), ) args = parser.parse_args() checkpoints_dir: Path = args.checkpoints_dir config_dir: Path = args.configs_dir save_directory: Path = args.pytorch_dump_folder_path maskformer_dir: Path = args.maskformer_dir # append the path to the parents to maskformer dir sys.path.append(str(maskformer_dir.parent)) # and import what's needed from MaskFormer.mask_former import add_mask_former_config from MaskFormer.mask_former.mask_former_model import MaskFormer as OriginalMaskFormer if not save_directory.exists(): save_directory.mkdir(parents=True) for config_file, checkpoint_file in OriginalMaskFormerCheckpointToOursConverter.using_dirs( checkpoints_dir, config_dir ): feature_extractor = OriginalMaskFormerConfigToFeatureExtractorConverter()( setup_cfg(Args(config_file=config_file)) ) original_config = setup_cfg(Args(config_file=config_file)) mask_former_kwargs = OriginalMaskFormer.from_config(original_config) original_model = OriginalMaskFormer(**mask_former_kwargs).eval() DetectionCheckpointer(original_model).load(str(checkpoint_file)) config: MaskFormerConfig = OriginalMaskFormerConfigToOursConverter()(original_config) mask_former = MaskFormerModel(config=config).eval() converter = OriginalMaskFormerCheckpointToOursConverter(original_model, config) maskformer = converter.convert(mask_former) mask_former_for_instance_segmentation = MaskFormerForInstanceSegmentation(config=config).eval() mask_former_for_instance_segmentation.model = mask_former mask_former_for_instance_segmentation = converter.convert_instance_segmentation( mask_former_for_instance_segmentation ) test(original_model, mask_former_for_instance_segmentation, feature_extractor) model_name = get_name(checkpoint_file) logger.info(f"🪄 Saving {model_name}") feature_extractor.save_pretrained(save_directory / model_name) mask_former_for_instance_segmentation.save_pretrained(save_directory / model_name) feature_extractor.push_to_hub( repo_path_or_name=save_directory / model_name, commit_message="Add model", use_temp_dir=True, ) mask_former_for_instance_segmentation.push_to_hub( repo_path_or_name=save_directory / model_name, commit_message="Add model", use_temp_dir=True, )
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./tests/models/owlvit/__init__.py
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./tests/models/cvt/test_modeling_tf_cvt.py
""" Testing suite for the Tensorflow CvT model. """ import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoFeatureExtractor class TFCvtConfigTester(ConfigTester): def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "embed_dim")) self.parent.assertTrue(hasattr(config, "num_heads")) class TFCvtModelTester: def __init__( self, parent, batch_size=13, image_size=64, num_channels=3, embed_dim=[16, 48, 96], num_heads=[1, 3, 6], depth=[1, 2, 10], patch_sizes=[7, 3, 3], patch_stride=[4, 2, 2], patch_padding=[2, 1, 1], stride_kv=[2, 2, 2], cls_token=[False, False, True], attention_drop_rate=[0.0, 0.0, 0.0], initializer_range=0.02, layer_norm_eps=1e-12, is_training=True, use_labels=True, num_labels=2, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_sizes = patch_sizes self.patch_stride = patch_stride self.patch_padding = patch_padding self.is_training = is_training self.use_labels = use_labels self.num_labels = num_labels self.num_channels = num_channels self.embed_dim = embed_dim self.num_heads = num_heads self.stride_kv = stride_kv self.depth = depth self.cls_token = cls_token self.attention_drop_rate = attention_drop_rate self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: # create a random int32 tensor of given shape labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return CvtConfig( image_size=self.image_size, num_labels=self.num_labels, num_channels=self.num_channels, embed_dim=self.embed_dim, num_heads=self.num_heads, patch_sizes=self.patch_sizes, patch_padding=self.patch_padding, patch_stride=self.patch_stride, stride_kv=self.stride_kv, depth=self.depth, cls_token=self.cls_token, attention_drop_rate=self.attention_drop_rate, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values, labels): model = TFCvtModel(config=config) result = model(pixel_values, training=False) image_size = (self.image_size, self.image_size) height, width = image_size[0], image_size[1] for i in range(len(self.depth)): height = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1) width = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.embed_dim[-1], height, width)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.num_labels model = TFCvtForImageClassification(config) result = model(pixel_values, labels=labels, training=False) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class TFCvtModelTest(TFModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as Cvt does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = False test_onnx = False def setUp(self): self.model_tester = TFCvtModelTester(self) self.config_tester = TFCvtConfigTester(self, config_class=CvtConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason="Cvt does not output attentions") def test_attention_outputs(self): pass @unittest.skip(reason="Cvt does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Cvt does not support input and output embeddings") def test_model_common_attributes(self): pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, reason="TF does not support backprop for grouped convolutions on CPU.", ) def test_dataset_conversion(self): super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, reason="TF does not support backprop for grouped convolutions on CPU.", ) def test_keras_fit(self): super().test_keras_fit() def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = len(self.model_tester.depth) self.assertEqual(len(hidden_states), expected_num_layers) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:]), [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFCvtModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_tf @require_vision class TFCvtModelIntegrationTest(unittest.TestCase): @cached_property def default_feature_extractor(self): return AutoFeatureExtractor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) @slow def test_inference_image_classification_head(self): model = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) feature_extractor = self.default_feature_extractor image = prepare_img() inputs = feature_extractor(images=image, return_tensors="tf") # forward pass outputs = model(**inputs) # verify the logits expected_shape = tf.TensorShape((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = tf.constant([0.9285, 0.9015, -0.3150]) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy(), expected_slice, atol=1e-4))
""" Testing suite for the Tensorflow CvT model. """ import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoFeatureExtractor class TFCvtConfigTester(ConfigTester): def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "embed_dim")) self.parent.assertTrue(hasattr(config, "num_heads")) class TFCvtModelTester: def __init__( self, parent, batch_size=13, image_size=64, num_channels=3, embed_dim=[16, 48, 96], num_heads=[1, 3, 6], depth=[1, 2, 10], patch_sizes=[7, 3, 3], patch_stride=[4, 2, 2], patch_padding=[2, 1, 1], stride_kv=[2, 2, 2], cls_token=[False, False, True], attention_drop_rate=[0.0, 0.0, 0.0], initializer_range=0.02, layer_norm_eps=1e-12, is_training=True, use_labels=True, num_labels=2, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_sizes = patch_sizes self.patch_stride = patch_stride self.patch_padding = patch_padding self.is_training = is_training self.use_labels = use_labels self.num_labels = num_labels self.num_channels = num_channels self.embed_dim = embed_dim self.num_heads = num_heads self.stride_kv = stride_kv self.depth = depth self.cls_token = cls_token self.attention_drop_rate = attention_drop_rate self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: # create a random int32 tensor of given shape labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return CvtConfig( image_size=self.image_size, num_labels=self.num_labels, num_channels=self.num_channels, embed_dim=self.embed_dim, num_heads=self.num_heads, patch_sizes=self.patch_sizes, patch_padding=self.patch_padding, patch_stride=self.patch_stride, stride_kv=self.stride_kv, depth=self.depth, cls_token=self.cls_token, attention_drop_rate=self.attention_drop_rate, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values, labels): model = TFCvtModel(config=config) result = model(pixel_values, training=False) image_size = (self.image_size, self.image_size) height, width = image_size[0], image_size[1] for i in range(len(self.depth)): height = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1) width = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.embed_dim[-1], height, width)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.num_labels model = TFCvtForImageClassification(config) result = model(pixel_values, labels=labels, training=False) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class TFCvtModelTest(TFModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as Cvt does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = False test_onnx = False def setUp(self): self.model_tester = TFCvtModelTester(self) self.config_tester = TFCvtConfigTester(self, config_class=CvtConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason="Cvt does not output attentions") def test_attention_outputs(self): pass @unittest.skip(reason="Cvt does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Cvt does not support input and output embeddings") def test_model_common_attributes(self): pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, reason="TF does not support backprop for grouped convolutions on CPU.", ) def test_dataset_conversion(self): super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, reason="TF does not support backprop for grouped convolutions on CPU.", ) def test_keras_fit(self): super().test_keras_fit() def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = len(self.model_tester.depth) self.assertEqual(len(hidden_states), expected_num_layers) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:]), [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFCvtModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_tf @require_vision class TFCvtModelIntegrationTest(unittest.TestCase): @cached_property def default_feature_extractor(self): return AutoFeatureExtractor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) @slow def test_inference_image_classification_head(self): model = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) feature_extractor = self.default_feature_extractor image = prepare_img() inputs = feature_extractor(images=image, return_tensors="tf") # forward pass outputs = model(**inputs) # verify the logits expected_shape = tf.TensorShape((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = tf.constant([0.9285, 0.9015, -0.3150]) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy(), expected_slice, atol=1e-4))
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./examples/research_projects/distillation/distiller.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The distiller to distil the student. Adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) """ import math import os import time import psutil import torch from torch import nn from torch.optim import AdamW from torch.utils.data import BatchSampler, DataLoader, RandomSampler from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm from grouped_batch_sampler import GroupedBatchSampler, create_lengths_groups from lm_seqs_dataset import LmSeqsDataset from transformers import get_linear_schedule_with_warmup from utils import logger try: from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import SummaryWriter class Distiller: def __init__( self, params: dict, dataset: LmSeqsDataset, token_probs: torch.tensor, student: nn.Module, teacher: nn.Module ): logger.info("Initializing Distiller") self.params = params self.dump_path = params.dump_path self.multi_gpu = params.multi_gpu self.fp16 = params.fp16 self.student = student self.teacher = teacher self.student_config = student.config self.vocab_size = student.config.vocab_size if params.n_gpu <= 1: sampler = RandomSampler(dataset) else: sampler = DistributedSampler(dataset) if params.group_by_size: groups = create_lengths_groups(lengths=dataset.lengths, k=params.max_model_input_size) sampler = GroupedBatchSampler(sampler=sampler, group_ids=groups, batch_size=params.batch_size) else: sampler = BatchSampler(sampler=sampler, batch_size=params.batch_size, drop_last=False) self.dataloader = DataLoader(dataset=dataset, batch_sampler=sampler, collate_fn=dataset.batch_sequences) self.temperature = params.temperature assert self.temperature > 0.0 self.alpha_ce = params.alpha_ce self.alpha_mlm = params.alpha_mlm self.alpha_clm = params.alpha_clm self.alpha_mse = params.alpha_mse self.alpha_cos = params.alpha_cos self.mlm = params.mlm if self.mlm: logger.info("Using MLM loss for LM step.") self.mlm_mask_prop = params.mlm_mask_prop assert 0.0 <= self.mlm_mask_prop <= 1.0 assert params.word_mask + params.word_keep + params.word_rand == 1.0 self.pred_probs = torch.FloatTensor([params.word_mask, params.word_keep, params.word_rand]) self.pred_probs = self.pred_probs.to(f"cuda:{params.local_rank}") if params.n_gpu > 0 else self.pred_probs self.token_probs = token_probs.to(f"cuda:{params.local_rank}") if params.n_gpu > 0 else token_probs if self.fp16: self.pred_probs = self.pred_probs.half() self.token_probs = self.token_probs.half() else: logger.info("Using CLM loss for LM step.") self.epoch = 0 self.n_iter = 0 self.n_total_iter = 0 self.n_sequences_epoch = 0 self.total_loss_epoch = 0 self.last_loss = 0 self.last_loss_ce = 0 self.last_loss_mlm = 0 self.last_loss_clm = 0 if self.alpha_mse > 0.0: self.last_loss_mse = 0 if self.alpha_cos > 0.0: self.last_loss_cos = 0 self.last_log = 0 self.ce_loss_fct = nn.KLDivLoss(reduction="batchmean") self.lm_loss_fct = nn.CrossEntropyLoss(ignore_index=-100) if self.alpha_mse > 0.0: self.mse_loss_fct = nn.MSELoss(reduction="sum") if self.alpha_cos > 0.0: self.cosine_loss_fct = nn.CosineEmbeddingLoss(reduction="mean") logger.info("--- Initializing model optimizer") assert params.gradient_accumulation_steps >= 1 self.num_steps_epoch = len(self.dataloader) num_train_optimization_steps = ( int(self.num_steps_epoch / params.gradient_accumulation_steps * params.n_epoch) + 1 ) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [ p for n, p in student.named_parameters() if not any(nd in n for nd in no_decay) and p.requires_grad ], "weight_decay": params.weight_decay, }, { "params": [ p for n, p in student.named_parameters() if any(nd in n for nd in no_decay) and p.requires_grad ], "weight_decay": 0.0, }, ] logger.info( "------ Number of trainable parameters (student): %i" % sum([p.numel() for p in self.student.parameters() if p.requires_grad]) ) logger.info("------ Number of parameters (student): %i" % sum([p.numel() for p in self.student.parameters()])) self.optimizer = AdamW( optimizer_grouped_parameters, lr=params.learning_rate, eps=params.adam_epsilon, betas=(0.9, 0.98) ) warmup_steps = math.ceil(num_train_optimization_steps * params.warmup_prop) self.scheduler = get_linear_schedule_with_warmup( self.optimizer, num_warmup_steps=warmup_steps, num_training_steps=num_train_optimization_steps ) if self.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") logger.info(f"Using fp16 training: {self.params.fp16_opt_level} level") self.student, self.optimizer = amp.initialize( self.student, self.optimizer, opt_level=self.params.fp16_opt_level ) self.teacher = self.teacher.half() if self.multi_gpu: if self.fp16: from apex.parallel import DistributedDataParallel logger.info("Using apex.parallel.DistributedDataParallel for distributed training.") self.student = DistributedDataParallel(self.student) else: from torch.nn.parallel import DistributedDataParallel logger.info("Using nn.parallel.DistributedDataParallel for distributed training.") self.student = DistributedDataParallel( self.student, device_ids=[params.local_rank], output_device=params.local_rank, find_unused_parameters=True, ) self.is_master = params.is_master if self.is_master: logger.info("--- Initializing Tensorboard") self.tensorboard = SummaryWriter(log_dir=os.path.join(self.dump_path, "log", "train")) self.tensorboard.add_text(tag="config/training", text_string=str(self.params), global_step=0) self.tensorboard.add_text(tag="config/student", text_string=str(self.student_config), global_step=0) def prepare_batch_mlm(self, batch): """ Prepare the batch: from the token_ids and the lengths, compute the attention mask and the masked label for MLM. Input: ------ batch: `Tuple` token_ids: `torch.tensor(bs, seq_length)` - The token ids for each of the sequence. It is padded. lengths: `torch.tensor(bs)` - The lengths of each of the sequences in the batch. Output: ------- token_ids: `torch.tensor(bs, seq_length)` - The token ids after the modifications for MLM. attn_mask: `torch.tensor(bs, seq_length)` - The attention mask for the self-attention. mlm_labels: `torch.tensor(bs, seq_length)` - The masked language modeling labels. There is a -100 where there is nothing to predict. """ token_ids, lengths = batch token_ids, lengths = self.round_batch(x=token_ids, lengths=lengths) assert token_ids.size(0) == lengths.size(0) attn_mask = torch.arange(token_ids.size(1), dtype=torch.long, device=lengths.device) < lengths[:, None] bs, max_seq_len = token_ids.size() mlm_labels = token_ids.new(token_ids.size()).copy_(token_ids) x_prob = self.token_probs[token_ids.flatten()] n_tgt = math.ceil(self.mlm_mask_prop * lengths.sum().item()) tgt_ids = torch.multinomial(x_prob / x_prob.sum(), n_tgt, replacement=False) pred_mask = torch.zeros( bs * max_seq_len, dtype=torch.bool, device=token_ids.device ) # previously `dtype=torch.uint8`, cf pytorch 1.2.0 compatibility pred_mask[tgt_ids] = 1 pred_mask = pred_mask.view(bs, max_seq_len) pred_mask[token_ids == self.params.special_tok_ids["pad_token"]] = 0 # mask a number of words == 0 [8] (faster with fp16) if self.fp16: n1 = pred_mask.sum().item() if n1 > 8: pred_mask = pred_mask.view(-1) n2 = max(n1 % 8, 8 * (n1 // 8)) if n2 != n1: pred_mask[torch.nonzero(pred_mask).view(-1)[: n1 - n2]] = 0 pred_mask = pred_mask.view(bs, max_seq_len) assert pred_mask.sum().item() % 8 == 0, pred_mask.sum().item() _token_ids_real = token_ids[pred_mask] _token_ids_rand = _token_ids_real.clone().random_(self.vocab_size) _token_ids_mask = _token_ids_real.clone().fill_(self.params.special_tok_ids["mask_token"]) probs = torch.multinomial(self.pred_probs, len(_token_ids_real), replacement=True) _token_ids = ( _token_ids_mask * (probs == 0).long() + _token_ids_real * (probs == 1).long() + _token_ids_rand * (probs == 2).long() ) token_ids = token_ids.masked_scatter(pred_mask, _token_ids) mlm_labels[~pred_mask] = -100 # previously `mlm_labels[1-pred_mask] = -1`, cf pytorch 1.2.0 compatibility # sanity checks assert 0 <= token_ids.min() <= token_ids.max() < self.vocab_size return token_ids, attn_mask, mlm_labels def prepare_batch_clm(self, batch): """ Prepare the batch: from the token_ids and the lengths, compute the attention mask and the labels for CLM. Input: ------ batch: `Tuple` token_ids: `torch.tensor(bs, seq_length)` - The token ids for each of the sequence. It is padded. lengths: `torch.tensor(bs)` - The lengths of each of the sequences in the batch. Output: ------- token_ids: `torch.tensor(bs, seq_length)` - The token ids after the modifications for MLM. attn_mask: `torch.tensor(bs, seq_length)` - The attention mask for the self-attention. clm_labels: `torch.tensor(bs, seq_length)` - The causal language modeling labels. There is a -100 where there is nothing to predict. """ token_ids, lengths = batch token_ids, lengths = self.round_batch(x=token_ids, lengths=lengths) assert token_ids.size(0) == lengths.size(0) attn_mask = torch.arange(token_ids.size(1), dtype=torch.long, device=lengths.device) < lengths[:, None] clm_labels = token_ids.new(token_ids.size()).copy_(token_ids) clm_labels[~attn_mask] = -100 # previously `clm_labels[1-attn_mask] = -1`, cf pytorch 1.2.0 compatibility # sanity checks assert 0 <= token_ids.min() <= token_ids.max() < self.vocab_size return token_ids, attn_mask, clm_labels def round_batch(self, x: torch.tensor, lengths: torch.tensor): """ For float16 only. Sub-sample sentences in a batch, and add padding, so that each dimension is a multiple of 8. Input: ------ x: `torch.tensor(bs, seq_length)` - The token ids. lengths: `torch.tensor(bs, seq_length)` - The lengths of each of the sequence in the batch. Output: ------- x: `torch.tensor(new_bs, new_seq_length)` - The updated token ids. lengths: `torch.tensor(new_bs, new_seq_length)` - The updated lengths. """ if not self.fp16 or len(lengths) < 8: return x, lengths # number of sentences == 0 [8] bs1 = len(lengths) bs2 = 8 * (bs1 // 8) assert bs2 > 0 and bs2 % 8 == 0 if bs1 != bs2: idx = torch.randperm(bs1)[:bs2] lengths = lengths[idx] slen = lengths.max().item() x = x[idx, :slen] else: idx = None # sequence length == 0 [8] ml1 = x.size(1) if ml1 % 8 != 0: pad = 8 - (ml1 % 8) ml2 = ml1 + pad if self.mlm: pad_id = self.params.special_tok_ids["pad_token"] else: pad_id = self.params.special_tok_ids["unk_token"] padding_tensor = torch.zeros(bs2, pad, dtype=torch.long, device=x.device).fill_(pad_id) x = torch.cat([x, padding_tensor], 1) assert x.size() == (bs2, ml2) assert x.size(0) % 8 == 0 assert x.size(1) % 8 == 0 return x, lengths def train(self): """ The real training loop. """ if self.is_master: logger.info("Starting training") self.last_log = time.time() self.student.train() self.teacher.eval() for _ in range(self.params.n_epoch): if self.is_master: logger.info(f"--- Starting epoch {self.epoch}/{self.params.n_epoch-1}") if self.multi_gpu: torch.distributed.barrier() iter_bar = tqdm(self.dataloader, desc="-Iter", disable=self.params.local_rank not in [-1, 0]) for batch in iter_bar: if self.params.n_gpu > 0: batch = tuple(t.to(f"cuda:{self.params.local_rank}") for t in batch) if self.mlm: token_ids, attn_mask, lm_labels = self.prepare_batch_mlm(batch=batch) else: token_ids, attn_mask, lm_labels = self.prepare_batch_clm(batch=batch) self.step(input_ids=token_ids, attention_mask=attn_mask, lm_labels=lm_labels) iter_bar.update() iter_bar.set_postfix( {"Last_loss": f"{self.last_loss:.2f}", "Avg_cum_loss": f"{self.total_loss_epoch/self.n_iter:.2f}"} ) iter_bar.close() if self.is_master: logger.info(f"--- Ending epoch {self.epoch}/{self.params.n_epoch-1}") self.end_epoch() if self.is_master: logger.info("Save very last checkpoint as `pytorch_model.bin`.") self.save_checkpoint(checkpoint_name="pytorch_model.bin") logger.info("Training is finished") def step(self, input_ids: torch.tensor, attention_mask: torch.tensor, lm_labels: torch.tensor): """ One optimization step: forward of student AND teacher, backward on the loss (for gradient accumulation), and possibly a parameter update (depending on the gradient accumulation). Input: ------ input_ids: `torch.tensor(bs, seq_length)` - The token ids. attention_mask: `torch.tensor(bs, seq_length)` - The attention mask for self attention. lm_labels: `torch.tensor(bs, seq_length)` - The language modeling labels (mlm labels for MLM and clm labels for CLM). """ if self.mlm: student_outputs = self.student( input_ids=input_ids, attention_mask=attention_mask ) # (bs, seq_length, voc_size) with torch.no_grad(): teacher_outputs = self.teacher( input_ids=input_ids, attention_mask=attention_mask ) # (bs, seq_length, voc_size) else: student_outputs = self.student(input_ids=input_ids, attention_mask=None) # (bs, seq_length, voc_size) with torch.no_grad(): teacher_outputs = self.teacher(input_ids=input_ids, attention_mask=None) # (bs, seq_length, voc_size) s_logits, s_hidden_states = student_outputs["logits"], student_outputs["hidden_states"] t_logits, t_hidden_states = teacher_outputs["logits"], teacher_outputs["hidden_states"] assert s_logits.size() == t_logits.size() # https://github.com/peterliht/knowledge-distillation-pytorch/blob/master/model/net.py#L100 # https://github.com/peterliht/knowledge-distillation-pytorch/issues/2 if self.params.restrict_ce_to_mask: mask = (lm_labels > -1).unsqueeze(-1).expand_as(s_logits) # (bs, seq_length, voc_size) else: mask = attention_mask.unsqueeze(-1).expand_as(s_logits) # (bs, seq_length, voc_size) s_logits_slct = torch.masked_select(s_logits, mask) # (bs * seq_length * voc_size) modulo the 1s in mask s_logits_slct = s_logits_slct.view(-1, s_logits.size(-1)) # (bs * seq_length, voc_size) modulo the 1s in mask t_logits_slct = torch.masked_select(t_logits, mask) # (bs * seq_length * voc_size) modulo the 1s in mask t_logits_slct = t_logits_slct.view(-1, s_logits.size(-1)) # (bs * seq_length, voc_size) modulo the 1s in mask assert t_logits_slct.size() == s_logits_slct.size() loss_ce = ( self.ce_loss_fct( nn.functional.log_softmax(s_logits_slct / self.temperature, dim=-1), nn.functional.softmax(t_logits_slct / self.temperature, dim=-1), ) * (self.temperature) ** 2 ) loss = self.alpha_ce * loss_ce if self.alpha_mlm > 0.0: loss_mlm = self.lm_loss_fct(s_logits.view(-1, s_logits.size(-1)), lm_labels.view(-1)) loss += self.alpha_mlm * loss_mlm if self.alpha_clm > 0.0: shift_logits = s_logits[..., :-1, :].contiguous() shift_labels = lm_labels[..., 1:].contiguous() loss_clm = self.lm_loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) loss += self.alpha_clm * loss_clm if self.alpha_mse > 0.0: loss_mse = self.mse_loss_fct(s_logits_slct, t_logits_slct) / s_logits_slct.size( 0 ) # Reproducing batchmean reduction loss += self.alpha_mse * loss_mse if self.alpha_cos > 0.0: s_hidden_states = s_hidden_states[-1] # (bs, seq_length, dim) t_hidden_states = t_hidden_states[-1] # (bs, seq_length, dim) mask = attention_mask.unsqueeze(-1).expand_as(s_hidden_states) # (bs, seq_length, dim) assert s_hidden_states.size() == t_hidden_states.size() dim = s_hidden_states.size(-1) s_hidden_states_slct = torch.masked_select(s_hidden_states, mask) # (bs * seq_length * dim) s_hidden_states_slct = s_hidden_states_slct.view(-1, dim) # (bs * seq_length, dim) t_hidden_states_slct = torch.masked_select(t_hidden_states, mask) # (bs * seq_length * dim) t_hidden_states_slct = t_hidden_states_slct.view(-1, dim) # (bs * seq_length, dim) target = s_hidden_states_slct.new(s_hidden_states_slct.size(0)).fill_(1) # (bs * seq_length,) loss_cos = self.cosine_loss_fct(s_hidden_states_slct, t_hidden_states_slct, target) loss += self.alpha_cos * loss_cos self.total_loss_epoch += loss.item() self.last_loss = loss.item() self.last_loss_ce = loss_ce.item() if self.alpha_mlm > 0.0: self.last_loss_mlm = loss_mlm.item() if self.alpha_clm > 0.0: self.last_loss_clm = loss_clm.item() if self.alpha_mse > 0.0: self.last_loss_mse = loss_mse.item() if self.alpha_cos > 0.0: self.last_loss_cos = loss_cos.item() self.optimize(loss) self.n_sequences_epoch += input_ids.size(0) def optimize(self, loss): """ Normalization on the loss (gradient accumulation or distributed training), followed by backward pass on the loss, possibly followed by a parameter update (depending on the gradient accumulation). Also update the metrics for tensorboard. """ # Check for NaN if (loss != loss).data.any(): logger.error("NaN detected") exit() if self.multi_gpu: loss = loss.mean() if self.params.gradient_accumulation_steps > 1: loss = loss / self.params.gradient_accumulation_steps if self.fp16: from apex import amp with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() self.iter() if self.n_iter % self.params.gradient_accumulation_steps == 0: if self.fp16: nn.utils.clip_grad_norm_(amp.master_params(self.optimizer), self.params.max_grad_norm) else: nn.utils.clip_grad_norm_(self.student.parameters(), self.params.max_grad_norm) self.optimizer.step() self.optimizer.zero_grad() self.scheduler.step() def iter(self): """ Update global counts, write to tensorboard and save checkpoint. """ self.n_iter += 1 self.n_total_iter += 1 if self.n_total_iter % self.params.log_interval == 0: self.log_tensorboard() self.last_log = time.time() if self.n_total_iter % self.params.checkpoint_interval == 0: self.save_checkpoint() def log_tensorboard(self): """ Log into tensorboard. Only by the master process. """ if not self.is_master: return for param_name, param in self.student.named_parameters(): self.tensorboard.add_scalar( tag="parameter_mean/" + param_name, scalar_value=param.data.mean(), global_step=self.n_total_iter ) self.tensorboard.add_scalar( tag="parameter_std/" + param_name, scalar_value=param.data.std(), global_step=self.n_total_iter ) if param.grad is None: continue self.tensorboard.add_scalar( tag="grad_mean/" + param_name, scalar_value=param.grad.data.mean(), global_step=self.n_total_iter ) self.tensorboard.add_scalar( tag="grad_std/" + param_name, scalar_value=param.grad.data.std(), global_step=self.n_total_iter ) self.tensorboard.add_scalar( tag="losses/cum_avg_loss_epoch", scalar_value=self.total_loss_epoch / self.n_iter, global_step=self.n_total_iter, ) self.tensorboard.add_scalar(tag="losses/loss", scalar_value=self.last_loss, global_step=self.n_total_iter) self.tensorboard.add_scalar( tag="losses/loss_ce", scalar_value=self.last_loss_ce, global_step=self.n_total_iter ) if self.alpha_mlm > 0.0: self.tensorboard.add_scalar( tag="losses/loss_mlm", scalar_value=self.last_loss_mlm, global_step=self.n_total_iter ) if self.alpha_clm > 0.0: self.tensorboard.add_scalar( tag="losses/loss_clm", scalar_value=self.last_loss_clm, global_step=self.n_total_iter ) if self.alpha_mse > 0.0: self.tensorboard.add_scalar( tag="losses/loss_mse", scalar_value=self.last_loss_mse, global_step=self.n_total_iter ) if self.alpha_cos > 0.0: self.tensorboard.add_scalar( tag="losses/loss_cos", scalar_value=self.last_loss_cos, global_step=self.n_total_iter ) self.tensorboard.add_scalar( tag="learning_rate/lr", scalar_value=self.scheduler.get_lr()[0], global_step=self.n_total_iter ) self.tensorboard.add_scalar( tag="global/memory_usage", scalar_value=psutil.virtual_memory()._asdict()["used"] / 1_000_000, global_step=self.n_total_iter, ) self.tensorboard.add_scalar( tag="global/speed", scalar_value=time.time() - self.last_log, global_step=self.n_total_iter ) def end_epoch(self): """ Finally arrived at the end of epoch (full pass on dataset). Do some tensorboard logging and checkpoint saving. """ logger.info(f"{self.n_sequences_epoch} sequences have been trained during this epoch.") if self.is_master: self.save_checkpoint(checkpoint_name=f"model_epoch_{self.epoch}.pth") self.tensorboard.add_scalar( tag="epoch/loss", scalar_value=self.total_loss_epoch / self.n_iter, global_step=self.epoch ) self.epoch += 1 self.n_sequences_epoch = 0 self.n_iter = 0 self.total_loss_epoch = 0 def save_checkpoint(self, checkpoint_name: str = "checkpoint.pth"): """ Save the current state. Only by the master process. """ if not self.is_master: return mdl_to_save = self.student.module if hasattr(self.student, "module") else self.student mdl_to_save.config.save_pretrained(self.dump_path) state_dict = mdl_to_save.state_dict() torch.save(state_dict, os.path.join(self.dump_path, checkpoint_name))
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The distiller to distil the student. Adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) """ import math import os import time import psutil import torch from torch import nn from torch.optim import AdamW from torch.utils.data import BatchSampler, DataLoader, RandomSampler from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm from grouped_batch_sampler import GroupedBatchSampler, create_lengths_groups from lm_seqs_dataset import LmSeqsDataset from transformers import get_linear_schedule_with_warmup from utils import logger try: from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import SummaryWriter class Distiller: def __init__( self, params: dict, dataset: LmSeqsDataset, token_probs: torch.tensor, student: nn.Module, teacher: nn.Module ): logger.info("Initializing Distiller") self.params = params self.dump_path = params.dump_path self.multi_gpu = params.multi_gpu self.fp16 = params.fp16 self.student = student self.teacher = teacher self.student_config = student.config self.vocab_size = student.config.vocab_size if params.n_gpu <= 1: sampler = RandomSampler(dataset) else: sampler = DistributedSampler(dataset) if params.group_by_size: groups = create_lengths_groups(lengths=dataset.lengths, k=params.max_model_input_size) sampler = GroupedBatchSampler(sampler=sampler, group_ids=groups, batch_size=params.batch_size) else: sampler = BatchSampler(sampler=sampler, batch_size=params.batch_size, drop_last=False) self.dataloader = DataLoader(dataset=dataset, batch_sampler=sampler, collate_fn=dataset.batch_sequences) self.temperature = params.temperature assert self.temperature > 0.0 self.alpha_ce = params.alpha_ce self.alpha_mlm = params.alpha_mlm self.alpha_clm = params.alpha_clm self.alpha_mse = params.alpha_mse self.alpha_cos = params.alpha_cos self.mlm = params.mlm if self.mlm: logger.info("Using MLM loss for LM step.") self.mlm_mask_prop = params.mlm_mask_prop assert 0.0 <= self.mlm_mask_prop <= 1.0 assert params.word_mask + params.word_keep + params.word_rand == 1.0 self.pred_probs = torch.FloatTensor([params.word_mask, params.word_keep, params.word_rand]) self.pred_probs = self.pred_probs.to(f"cuda:{params.local_rank}") if params.n_gpu > 0 else self.pred_probs self.token_probs = token_probs.to(f"cuda:{params.local_rank}") if params.n_gpu > 0 else token_probs if self.fp16: self.pred_probs = self.pred_probs.half() self.token_probs = self.token_probs.half() else: logger.info("Using CLM loss for LM step.") self.epoch = 0 self.n_iter = 0 self.n_total_iter = 0 self.n_sequences_epoch = 0 self.total_loss_epoch = 0 self.last_loss = 0 self.last_loss_ce = 0 self.last_loss_mlm = 0 self.last_loss_clm = 0 if self.alpha_mse > 0.0: self.last_loss_mse = 0 if self.alpha_cos > 0.0: self.last_loss_cos = 0 self.last_log = 0 self.ce_loss_fct = nn.KLDivLoss(reduction="batchmean") self.lm_loss_fct = nn.CrossEntropyLoss(ignore_index=-100) if self.alpha_mse > 0.0: self.mse_loss_fct = nn.MSELoss(reduction="sum") if self.alpha_cos > 0.0: self.cosine_loss_fct = nn.CosineEmbeddingLoss(reduction="mean") logger.info("--- Initializing model optimizer") assert params.gradient_accumulation_steps >= 1 self.num_steps_epoch = len(self.dataloader) num_train_optimization_steps = ( int(self.num_steps_epoch / params.gradient_accumulation_steps * params.n_epoch) + 1 ) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [ p for n, p in student.named_parameters() if not any(nd in n for nd in no_decay) and p.requires_grad ], "weight_decay": params.weight_decay, }, { "params": [ p for n, p in student.named_parameters() if any(nd in n for nd in no_decay) and p.requires_grad ], "weight_decay": 0.0, }, ] logger.info( "------ Number of trainable parameters (student): %i" % sum([p.numel() for p in self.student.parameters() if p.requires_grad]) ) logger.info("------ Number of parameters (student): %i" % sum([p.numel() for p in self.student.parameters()])) self.optimizer = AdamW( optimizer_grouped_parameters, lr=params.learning_rate, eps=params.adam_epsilon, betas=(0.9, 0.98) ) warmup_steps = math.ceil(num_train_optimization_steps * params.warmup_prop) self.scheduler = get_linear_schedule_with_warmup( self.optimizer, num_warmup_steps=warmup_steps, num_training_steps=num_train_optimization_steps ) if self.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") logger.info(f"Using fp16 training: {self.params.fp16_opt_level} level") self.student, self.optimizer = amp.initialize( self.student, self.optimizer, opt_level=self.params.fp16_opt_level ) self.teacher = self.teacher.half() if self.multi_gpu: if self.fp16: from apex.parallel import DistributedDataParallel logger.info("Using apex.parallel.DistributedDataParallel for distributed training.") self.student = DistributedDataParallel(self.student) else: from torch.nn.parallel import DistributedDataParallel logger.info("Using nn.parallel.DistributedDataParallel for distributed training.") self.student = DistributedDataParallel( self.student, device_ids=[params.local_rank], output_device=params.local_rank, find_unused_parameters=True, ) self.is_master = params.is_master if self.is_master: logger.info("--- Initializing Tensorboard") self.tensorboard = SummaryWriter(log_dir=os.path.join(self.dump_path, "log", "train")) self.tensorboard.add_text(tag="config/training", text_string=str(self.params), global_step=0) self.tensorboard.add_text(tag="config/student", text_string=str(self.student_config), global_step=0) def prepare_batch_mlm(self, batch): """ Prepare the batch: from the token_ids and the lengths, compute the attention mask and the masked label for MLM. Input: ------ batch: `Tuple` token_ids: `torch.tensor(bs, seq_length)` - The token ids for each of the sequence. It is padded. lengths: `torch.tensor(bs)` - The lengths of each of the sequences in the batch. Output: ------- token_ids: `torch.tensor(bs, seq_length)` - The token ids after the modifications for MLM. attn_mask: `torch.tensor(bs, seq_length)` - The attention mask for the self-attention. mlm_labels: `torch.tensor(bs, seq_length)` - The masked language modeling labels. There is a -100 where there is nothing to predict. """ token_ids, lengths = batch token_ids, lengths = self.round_batch(x=token_ids, lengths=lengths) assert token_ids.size(0) == lengths.size(0) attn_mask = torch.arange(token_ids.size(1), dtype=torch.long, device=lengths.device) < lengths[:, None] bs, max_seq_len = token_ids.size() mlm_labels = token_ids.new(token_ids.size()).copy_(token_ids) x_prob = self.token_probs[token_ids.flatten()] n_tgt = math.ceil(self.mlm_mask_prop * lengths.sum().item()) tgt_ids = torch.multinomial(x_prob / x_prob.sum(), n_tgt, replacement=False) pred_mask = torch.zeros( bs * max_seq_len, dtype=torch.bool, device=token_ids.device ) # previously `dtype=torch.uint8`, cf pytorch 1.2.0 compatibility pred_mask[tgt_ids] = 1 pred_mask = pred_mask.view(bs, max_seq_len) pred_mask[token_ids == self.params.special_tok_ids["pad_token"]] = 0 # mask a number of words == 0 [8] (faster with fp16) if self.fp16: n1 = pred_mask.sum().item() if n1 > 8: pred_mask = pred_mask.view(-1) n2 = max(n1 % 8, 8 * (n1 // 8)) if n2 != n1: pred_mask[torch.nonzero(pred_mask).view(-1)[: n1 - n2]] = 0 pred_mask = pred_mask.view(bs, max_seq_len) assert pred_mask.sum().item() % 8 == 0, pred_mask.sum().item() _token_ids_real = token_ids[pred_mask] _token_ids_rand = _token_ids_real.clone().random_(self.vocab_size) _token_ids_mask = _token_ids_real.clone().fill_(self.params.special_tok_ids["mask_token"]) probs = torch.multinomial(self.pred_probs, len(_token_ids_real), replacement=True) _token_ids = ( _token_ids_mask * (probs == 0).long() + _token_ids_real * (probs == 1).long() + _token_ids_rand * (probs == 2).long() ) token_ids = token_ids.masked_scatter(pred_mask, _token_ids) mlm_labels[~pred_mask] = -100 # previously `mlm_labels[1-pred_mask] = -1`, cf pytorch 1.2.0 compatibility # sanity checks assert 0 <= token_ids.min() <= token_ids.max() < self.vocab_size return token_ids, attn_mask, mlm_labels def prepare_batch_clm(self, batch): """ Prepare the batch: from the token_ids and the lengths, compute the attention mask and the labels for CLM. Input: ------ batch: `Tuple` token_ids: `torch.tensor(bs, seq_length)` - The token ids for each of the sequence. It is padded. lengths: `torch.tensor(bs)` - The lengths of each of the sequences in the batch. Output: ------- token_ids: `torch.tensor(bs, seq_length)` - The token ids after the modifications for MLM. attn_mask: `torch.tensor(bs, seq_length)` - The attention mask for the self-attention. clm_labels: `torch.tensor(bs, seq_length)` - The causal language modeling labels. There is a -100 where there is nothing to predict. """ token_ids, lengths = batch token_ids, lengths = self.round_batch(x=token_ids, lengths=lengths) assert token_ids.size(0) == lengths.size(0) attn_mask = torch.arange(token_ids.size(1), dtype=torch.long, device=lengths.device) < lengths[:, None] clm_labels = token_ids.new(token_ids.size()).copy_(token_ids) clm_labels[~attn_mask] = -100 # previously `clm_labels[1-attn_mask] = -1`, cf pytorch 1.2.0 compatibility # sanity checks assert 0 <= token_ids.min() <= token_ids.max() < self.vocab_size return token_ids, attn_mask, clm_labels def round_batch(self, x: torch.tensor, lengths: torch.tensor): """ For float16 only. Sub-sample sentences in a batch, and add padding, so that each dimension is a multiple of 8. Input: ------ x: `torch.tensor(bs, seq_length)` - The token ids. lengths: `torch.tensor(bs, seq_length)` - The lengths of each of the sequence in the batch. Output: ------- x: `torch.tensor(new_bs, new_seq_length)` - The updated token ids. lengths: `torch.tensor(new_bs, new_seq_length)` - The updated lengths. """ if not self.fp16 or len(lengths) < 8: return x, lengths # number of sentences == 0 [8] bs1 = len(lengths) bs2 = 8 * (bs1 // 8) assert bs2 > 0 and bs2 % 8 == 0 if bs1 != bs2: idx = torch.randperm(bs1)[:bs2] lengths = lengths[idx] slen = lengths.max().item() x = x[idx, :slen] else: idx = None # sequence length == 0 [8] ml1 = x.size(1) if ml1 % 8 != 0: pad = 8 - (ml1 % 8) ml2 = ml1 + pad if self.mlm: pad_id = self.params.special_tok_ids["pad_token"] else: pad_id = self.params.special_tok_ids["unk_token"] padding_tensor = torch.zeros(bs2, pad, dtype=torch.long, device=x.device).fill_(pad_id) x = torch.cat([x, padding_tensor], 1) assert x.size() == (bs2, ml2) assert x.size(0) % 8 == 0 assert x.size(1) % 8 == 0 return x, lengths def train(self): """ The real training loop. """ if self.is_master: logger.info("Starting training") self.last_log = time.time() self.student.train() self.teacher.eval() for _ in range(self.params.n_epoch): if self.is_master: logger.info(f"--- Starting epoch {self.epoch}/{self.params.n_epoch-1}") if self.multi_gpu: torch.distributed.barrier() iter_bar = tqdm(self.dataloader, desc="-Iter", disable=self.params.local_rank not in [-1, 0]) for batch in iter_bar: if self.params.n_gpu > 0: batch = tuple(t.to(f"cuda:{self.params.local_rank}") for t in batch) if self.mlm: token_ids, attn_mask, lm_labels = self.prepare_batch_mlm(batch=batch) else: token_ids, attn_mask, lm_labels = self.prepare_batch_clm(batch=batch) self.step(input_ids=token_ids, attention_mask=attn_mask, lm_labels=lm_labels) iter_bar.update() iter_bar.set_postfix( {"Last_loss": f"{self.last_loss:.2f}", "Avg_cum_loss": f"{self.total_loss_epoch/self.n_iter:.2f}"} ) iter_bar.close() if self.is_master: logger.info(f"--- Ending epoch {self.epoch}/{self.params.n_epoch-1}") self.end_epoch() if self.is_master: logger.info("Save very last checkpoint as `pytorch_model.bin`.") self.save_checkpoint(checkpoint_name="pytorch_model.bin") logger.info("Training is finished") def step(self, input_ids: torch.tensor, attention_mask: torch.tensor, lm_labels: torch.tensor): """ One optimization step: forward of student AND teacher, backward on the loss (for gradient accumulation), and possibly a parameter update (depending on the gradient accumulation). Input: ------ input_ids: `torch.tensor(bs, seq_length)` - The token ids. attention_mask: `torch.tensor(bs, seq_length)` - The attention mask for self attention. lm_labels: `torch.tensor(bs, seq_length)` - The language modeling labels (mlm labels for MLM and clm labels for CLM). """ if self.mlm: student_outputs = self.student( input_ids=input_ids, attention_mask=attention_mask ) # (bs, seq_length, voc_size) with torch.no_grad(): teacher_outputs = self.teacher( input_ids=input_ids, attention_mask=attention_mask ) # (bs, seq_length, voc_size) else: student_outputs = self.student(input_ids=input_ids, attention_mask=None) # (bs, seq_length, voc_size) with torch.no_grad(): teacher_outputs = self.teacher(input_ids=input_ids, attention_mask=None) # (bs, seq_length, voc_size) s_logits, s_hidden_states = student_outputs["logits"], student_outputs["hidden_states"] t_logits, t_hidden_states = teacher_outputs["logits"], teacher_outputs["hidden_states"] assert s_logits.size() == t_logits.size() # https://github.com/peterliht/knowledge-distillation-pytorch/blob/master/model/net.py#L100 # https://github.com/peterliht/knowledge-distillation-pytorch/issues/2 if self.params.restrict_ce_to_mask: mask = (lm_labels > -1).unsqueeze(-1).expand_as(s_logits) # (bs, seq_length, voc_size) else: mask = attention_mask.unsqueeze(-1).expand_as(s_logits) # (bs, seq_length, voc_size) s_logits_slct = torch.masked_select(s_logits, mask) # (bs * seq_length * voc_size) modulo the 1s in mask s_logits_slct = s_logits_slct.view(-1, s_logits.size(-1)) # (bs * seq_length, voc_size) modulo the 1s in mask t_logits_slct = torch.masked_select(t_logits, mask) # (bs * seq_length * voc_size) modulo the 1s in mask t_logits_slct = t_logits_slct.view(-1, s_logits.size(-1)) # (bs * seq_length, voc_size) modulo the 1s in mask assert t_logits_slct.size() == s_logits_slct.size() loss_ce = ( self.ce_loss_fct( nn.functional.log_softmax(s_logits_slct / self.temperature, dim=-1), nn.functional.softmax(t_logits_slct / self.temperature, dim=-1), ) * (self.temperature) ** 2 ) loss = self.alpha_ce * loss_ce if self.alpha_mlm > 0.0: loss_mlm = self.lm_loss_fct(s_logits.view(-1, s_logits.size(-1)), lm_labels.view(-1)) loss += self.alpha_mlm * loss_mlm if self.alpha_clm > 0.0: shift_logits = s_logits[..., :-1, :].contiguous() shift_labels = lm_labels[..., 1:].contiguous() loss_clm = self.lm_loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) loss += self.alpha_clm * loss_clm if self.alpha_mse > 0.0: loss_mse = self.mse_loss_fct(s_logits_slct, t_logits_slct) / s_logits_slct.size( 0 ) # Reproducing batchmean reduction loss += self.alpha_mse * loss_mse if self.alpha_cos > 0.0: s_hidden_states = s_hidden_states[-1] # (bs, seq_length, dim) t_hidden_states = t_hidden_states[-1] # (bs, seq_length, dim) mask = attention_mask.unsqueeze(-1).expand_as(s_hidden_states) # (bs, seq_length, dim) assert s_hidden_states.size() == t_hidden_states.size() dim = s_hidden_states.size(-1) s_hidden_states_slct = torch.masked_select(s_hidden_states, mask) # (bs * seq_length * dim) s_hidden_states_slct = s_hidden_states_slct.view(-1, dim) # (bs * seq_length, dim) t_hidden_states_slct = torch.masked_select(t_hidden_states, mask) # (bs * seq_length * dim) t_hidden_states_slct = t_hidden_states_slct.view(-1, dim) # (bs * seq_length, dim) target = s_hidden_states_slct.new(s_hidden_states_slct.size(0)).fill_(1) # (bs * seq_length,) loss_cos = self.cosine_loss_fct(s_hidden_states_slct, t_hidden_states_slct, target) loss += self.alpha_cos * loss_cos self.total_loss_epoch += loss.item() self.last_loss = loss.item() self.last_loss_ce = loss_ce.item() if self.alpha_mlm > 0.0: self.last_loss_mlm = loss_mlm.item() if self.alpha_clm > 0.0: self.last_loss_clm = loss_clm.item() if self.alpha_mse > 0.0: self.last_loss_mse = loss_mse.item() if self.alpha_cos > 0.0: self.last_loss_cos = loss_cos.item() self.optimize(loss) self.n_sequences_epoch += input_ids.size(0) def optimize(self, loss): """ Normalization on the loss (gradient accumulation or distributed training), followed by backward pass on the loss, possibly followed by a parameter update (depending on the gradient accumulation). Also update the metrics for tensorboard. """ # Check for NaN if (loss != loss).data.any(): logger.error("NaN detected") exit() if self.multi_gpu: loss = loss.mean() if self.params.gradient_accumulation_steps > 1: loss = loss / self.params.gradient_accumulation_steps if self.fp16: from apex import amp with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() self.iter() if self.n_iter % self.params.gradient_accumulation_steps == 0: if self.fp16: nn.utils.clip_grad_norm_(amp.master_params(self.optimizer), self.params.max_grad_norm) else: nn.utils.clip_grad_norm_(self.student.parameters(), self.params.max_grad_norm) self.optimizer.step() self.optimizer.zero_grad() self.scheduler.step() def iter(self): """ Update global counts, write to tensorboard and save checkpoint. """ self.n_iter += 1 self.n_total_iter += 1 if self.n_total_iter % self.params.log_interval == 0: self.log_tensorboard() self.last_log = time.time() if self.n_total_iter % self.params.checkpoint_interval == 0: self.save_checkpoint() def log_tensorboard(self): """ Log into tensorboard. Only by the master process. """ if not self.is_master: return for param_name, param in self.student.named_parameters(): self.tensorboard.add_scalar( tag="parameter_mean/" + param_name, scalar_value=param.data.mean(), global_step=self.n_total_iter ) self.tensorboard.add_scalar( tag="parameter_std/" + param_name, scalar_value=param.data.std(), global_step=self.n_total_iter ) if param.grad is None: continue self.tensorboard.add_scalar( tag="grad_mean/" + param_name, scalar_value=param.grad.data.mean(), global_step=self.n_total_iter ) self.tensorboard.add_scalar( tag="grad_std/" + param_name, scalar_value=param.grad.data.std(), global_step=self.n_total_iter ) self.tensorboard.add_scalar( tag="losses/cum_avg_loss_epoch", scalar_value=self.total_loss_epoch / self.n_iter, global_step=self.n_total_iter, ) self.tensorboard.add_scalar(tag="losses/loss", scalar_value=self.last_loss, global_step=self.n_total_iter) self.tensorboard.add_scalar( tag="losses/loss_ce", scalar_value=self.last_loss_ce, global_step=self.n_total_iter ) if self.alpha_mlm > 0.0: self.tensorboard.add_scalar( tag="losses/loss_mlm", scalar_value=self.last_loss_mlm, global_step=self.n_total_iter ) if self.alpha_clm > 0.0: self.tensorboard.add_scalar( tag="losses/loss_clm", scalar_value=self.last_loss_clm, global_step=self.n_total_iter ) if self.alpha_mse > 0.0: self.tensorboard.add_scalar( tag="losses/loss_mse", scalar_value=self.last_loss_mse, global_step=self.n_total_iter ) if self.alpha_cos > 0.0: self.tensorboard.add_scalar( tag="losses/loss_cos", scalar_value=self.last_loss_cos, global_step=self.n_total_iter ) self.tensorboard.add_scalar( tag="learning_rate/lr", scalar_value=self.scheduler.get_lr()[0], global_step=self.n_total_iter ) self.tensorboard.add_scalar( tag="global/memory_usage", scalar_value=psutil.virtual_memory()._asdict()["used"] / 1_000_000, global_step=self.n_total_iter, ) self.tensorboard.add_scalar( tag="global/speed", scalar_value=time.time() - self.last_log, global_step=self.n_total_iter ) def end_epoch(self): """ Finally arrived at the end of epoch (full pass on dataset). Do some tensorboard logging and checkpoint saving. """ logger.info(f"{self.n_sequences_epoch} sequences have been trained during this epoch.") if self.is_master: self.save_checkpoint(checkpoint_name=f"model_epoch_{self.epoch}.pth") self.tensorboard.add_scalar( tag="epoch/loss", scalar_value=self.total_loss_epoch / self.n_iter, global_step=self.epoch ) self.epoch += 1 self.n_sequences_epoch = 0 self.n_iter = 0 self.total_loss_epoch = 0 def save_checkpoint(self, checkpoint_name: str = "checkpoint.pth"): """ Save the current state. Only by the master process. """ if not self.is_master: return mdl_to_save = self.student.module if hasattr(self.student, "module") else self.student mdl_to_save.config.save_pretrained(self.dump_path) state_dict = mdl_to_save.state_dict() torch.save(state_dict, os.path.join(self.dump_path, checkpoint_name))
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./tests/fixtures/vocab.json
{"l": 0, "o": 1, "w": 2, "e": 3, "r": 4, "s": 5, "t": 6, "i": 7, "d": 8, "n": 9, "Ġ": 10, "Ġl": 11, "Ġn": 12, "Ġlo": 13, "Ġlow": 14, "er": 15, "Ġlowest": 16, "Ġnewer": 17, "Ġwider": 18, "<unk>": 19, "<|endoftext|>": 20}
{"l": 0, "o": 1, "w": 2, "e": 3, "r": 4, "s": 5, "t": 6, "i": 7, "d": 8, "n": 9, "Ġ": 10, "Ġl": 11, "Ġn": 12, "Ġlo": 13, "Ġlow": 14, "er": 15, "Ġlowest": 16, "Ġnewer": 17, "Ġwider": 18, "<unk>": 19, "<|endoftext|>": 20}
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./src/transformers/models/longt5/convert_longt5x_checkpoint_to_flax.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert T5/LongT5X checkpoints from the original repository to JAX/FLAX model. This script is an extension of 'src/transformers/models/t5/convert_t5x_checkpoint_to_flax. """ import argparse from t5x import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeq2SeqLM def convert_t5x_checkpoint_to_flax(t5x_checkpoint_path, config_name, flax_dump_folder_path): config = AutoConfig.from_pretrained(config_name) flax_model = FlaxAutoModelForSeq2SeqLM.from_config(config=config) t5x_model = checkpoints.load_t5x_checkpoint(t5x_checkpoint_path) split_mlp_wi = "wi_0" in t5x_model["target"]["encoder"]["layers_0"]["mlp"] if config.model_type == "t5": encoder_attn_name = "SelfAttention" if config.model_type == "longt5" and config.encoder_attention_type == "local": encoder_attn_name = "LocalSelfAttention" elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": encoder_attn_name = "TransientGlobalSelfAttention" else: raise ValueError( "Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`" " attribute with a value from ['local', 'transient-global]." ) # Encoder for layer_index in range(config.num_layers): layer_name = f"layers_{str(layer_index)}" # Self-Attention t5x_attention_key = t5x_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"] t5x_attention_out = t5x_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"] t5x_attention_query = t5x_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"] t5x_attention_value = t5x_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": t5x_global_layer_norm = t5x_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"] # Layer Normalization t5x_attention_layer_norm = t5x_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"] if split_mlp_wi: t5x_mlp_wi_0 = t5x_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"] t5x_mlp_wi_1 = t5x_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"] else: t5x_mlp_wi = t5x_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"] t5x_mlp_wo = t5x_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"] # Layer Normalization t5x_mlp_layer_norm = t5x_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"] # Assigning flax_model_encoder_layer_block = flax_model.params["encoder"]["block"][str(layer_index)]["layer"] flax_model_encoder_layer_block["0"][encoder_attn_name]["k"]["kernel"] = t5x_attention_key flax_model_encoder_layer_block["0"][encoder_attn_name]["o"]["kernel"] = t5x_attention_out flax_model_encoder_layer_block["0"][encoder_attn_name]["q"]["kernel"] = t5x_attention_query flax_model_encoder_layer_block["0"][encoder_attn_name]["v"]["kernel"] = t5x_attention_value flax_model_encoder_layer_block["0"]["layer_norm"]["weight"] = t5x_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": flax_model_encoder_layer_block["0"][encoder_attn_name]["global_input_layer_norm"][ "weight" ] = t5x_global_layer_norm if split_mlp_wi: flax_model_encoder_layer_block["1"]["DenseReluDense"]["wi_0"]["kernel"] = t5x_mlp_wi_0 flax_model_encoder_layer_block["1"]["DenseReluDense"]["wi_1"]["kernel"] = t5x_mlp_wi_1 else: flax_model_encoder_layer_block["1"]["DenseReluDense"]["wi"]["kernel"] = t5x_mlp_wi flax_model_encoder_layer_block["1"]["DenseReluDense"]["wo"]["kernel"] = t5x_mlp_wo flax_model_encoder_layer_block["1"]["layer_norm"]["weight"] = t5x_mlp_layer_norm flax_model.params["encoder"]["block"][str(layer_index)]["layer"] = flax_model_encoder_layer_block # Only for layer 0: t5x_encoder_rel_embedding = t5x_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T flax_model.params["encoder"]["block"]["0"]["layer"]["0"][encoder_attn_name]["relative_attention_bias"][ "embedding" ] = t5x_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": t5x_encoder_global_rel_embedding = t5x_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T flax_model.params["encoder"]["block"]["0"]["layer"]["0"][encoder_attn_name]["global_relative_attention_bias"][ "embedding" ] = t5x_encoder_global_rel_embedding # Assigning t5x_encoder_norm = t5x_model["target"]["encoder"]["encoder_norm"]["scale"] flax_model.params["encoder"]["final_layer_norm"]["weight"] = t5x_encoder_norm # Decoder for layer_index in range(config.num_layers): layer_name = f"layers_{str(layer_index)}" # Self-Attention t5x_attention_key = t5x_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"] t5x_attention_out = t5x_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"] t5x_attention_query = t5x_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"] t5x_attention_value = t5x_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"] # Layer Normalization t5x_pre_attention_layer_norm = t5x_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][ "scale" ] # Encoder-Decoder-Attention t5x_enc_dec_attention_module = t5x_model["target"]["decoder"][layer_name]["encoder_decoder_attention"] t5x_enc_dec_attention_key = t5x_enc_dec_attention_module["key"]["kernel"] t5x_enc_dec_attention_out = t5x_enc_dec_attention_module["out"]["kernel"] t5x_enc_dec_attention_query = t5x_enc_dec_attention_module["query"]["kernel"] t5x_enc_dec_attention_value = t5x_enc_dec_attention_module["value"]["kernel"] # Layer Normalization t5x_cross_layer_norm = t5x_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"] # MLP if split_mlp_wi: t5x_mlp_wi_0 = t5x_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"] t5x_mlp_wi_1 = t5x_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"] else: t5x_mlp_wi = t5x_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"] t5x_mlp_wo = t5x_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"] # Layer Normalization tx5_mlp_layer_norm = t5x_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"] # Assigning flax_model_decoder_layer_block = flax_model.params["decoder"]["block"][str(layer_index)]["layer"] flax_model_decoder_layer_block["0"]["SelfAttention"]["k"]["kernel"] = t5x_attention_key flax_model_decoder_layer_block["0"]["SelfAttention"]["o"]["kernel"] = t5x_attention_out flax_model_decoder_layer_block["0"]["SelfAttention"]["q"]["kernel"] = t5x_attention_query flax_model_decoder_layer_block["0"]["SelfAttention"]["v"]["kernel"] = t5x_attention_value flax_model_decoder_layer_block["0"]["layer_norm"]["weight"] = t5x_pre_attention_layer_norm flax_model_decoder_layer_block["1"]["EncDecAttention"]["k"]["kernel"] = t5x_enc_dec_attention_key flax_model_decoder_layer_block["1"]["EncDecAttention"]["o"]["kernel"] = t5x_enc_dec_attention_out flax_model_decoder_layer_block["1"]["EncDecAttention"]["q"]["kernel"] = t5x_enc_dec_attention_query flax_model_decoder_layer_block["1"]["EncDecAttention"]["v"]["kernel"] = t5x_enc_dec_attention_value flax_model_decoder_layer_block["1"]["layer_norm"]["weight"] = t5x_cross_layer_norm if split_mlp_wi: flax_model_decoder_layer_block["2"]["DenseReluDense"]["wi_0"]["kernel"] = t5x_mlp_wi_0 flax_model_decoder_layer_block["2"]["DenseReluDense"]["wi_1"]["kernel"] = t5x_mlp_wi_1 else: flax_model_decoder_layer_block["2"]["DenseReluDense"]["wi"]["kernel"] = t5x_mlp_wi flax_model_decoder_layer_block["2"]["DenseReluDense"]["wo"]["kernel"] = t5x_mlp_wo flax_model_decoder_layer_block["2"]["layer_norm"]["weight"] = tx5_mlp_layer_norm flax_model.params["decoder"]["block"][str(layer_index)]["layer"] = flax_model_decoder_layer_block # Decoder Normalization tx5_decoder_norm = t5x_model["target"]["decoder"]["decoder_norm"]["scale"] flax_model.params["decoder"]["final_layer_norm"]["weight"] = tx5_decoder_norm # Only for layer 0: t5x_decoder_rel_embedding = t5x_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T flax_model.params["decoder"]["block"]["0"]["layer"]["0"]["SelfAttention"]["relative_attention_bias"][ "embedding" ] = t5x_decoder_rel_embedding # Token Embeddings tx5_token_embeddings = t5x_model["target"]["token_embedder"]["embedding"] flax_model.params["shared"]["embedding"] = tx5_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in t5x_model["target"]["decoder"]: flax_model.params["lm_head"]["kernel"] = t5x_model["target"]["decoder"]["logits_dense"]["kernel"] flax_model.save_pretrained(flax_dump_folder_path) print("T5X Model was sucessfully converted!") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint." ) parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.") parser.add_argument( "--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model." ) args = parser.parse_args() convert_t5x_checkpoint_to_flax(args.t5x_checkpoint_path, args.config_name, args.flax_dump_folder_path)
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert T5/LongT5X checkpoints from the original repository to JAX/FLAX model. This script is an extension of 'src/transformers/models/t5/convert_t5x_checkpoint_to_flax. """ import argparse from t5x import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeq2SeqLM def convert_t5x_checkpoint_to_flax(t5x_checkpoint_path, config_name, flax_dump_folder_path): config = AutoConfig.from_pretrained(config_name) flax_model = FlaxAutoModelForSeq2SeqLM.from_config(config=config) t5x_model = checkpoints.load_t5x_checkpoint(t5x_checkpoint_path) split_mlp_wi = "wi_0" in t5x_model["target"]["encoder"]["layers_0"]["mlp"] if config.model_type == "t5": encoder_attn_name = "SelfAttention" if config.model_type == "longt5" and config.encoder_attention_type == "local": encoder_attn_name = "LocalSelfAttention" elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": encoder_attn_name = "TransientGlobalSelfAttention" else: raise ValueError( "Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`" " attribute with a value from ['local', 'transient-global]." ) # Encoder for layer_index in range(config.num_layers): layer_name = f"layers_{str(layer_index)}" # Self-Attention t5x_attention_key = t5x_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"] t5x_attention_out = t5x_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"] t5x_attention_query = t5x_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"] t5x_attention_value = t5x_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": t5x_global_layer_norm = t5x_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"] # Layer Normalization t5x_attention_layer_norm = t5x_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"] if split_mlp_wi: t5x_mlp_wi_0 = t5x_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"] t5x_mlp_wi_1 = t5x_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"] else: t5x_mlp_wi = t5x_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"] t5x_mlp_wo = t5x_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"] # Layer Normalization t5x_mlp_layer_norm = t5x_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"] # Assigning flax_model_encoder_layer_block = flax_model.params["encoder"]["block"][str(layer_index)]["layer"] flax_model_encoder_layer_block["0"][encoder_attn_name]["k"]["kernel"] = t5x_attention_key flax_model_encoder_layer_block["0"][encoder_attn_name]["o"]["kernel"] = t5x_attention_out flax_model_encoder_layer_block["0"][encoder_attn_name]["q"]["kernel"] = t5x_attention_query flax_model_encoder_layer_block["0"][encoder_attn_name]["v"]["kernel"] = t5x_attention_value flax_model_encoder_layer_block["0"]["layer_norm"]["weight"] = t5x_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": flax_model_encoder_layer_block["0"][encoder_attn_name]["global_input_layer_norm"][ "weight" ] = t5x_global_layer_norm if split_mlp_wi: flax_model_encoder_layer_block["1"]["DenseReluDense"]["wi_0"]["kernel"] = t5x_mlp_wi_0 flax_model_encoder_layer_block["1"]["DenseReluDense"]["wi_1"]["kernel"] = t5x_mlp_wi_1 else: flax_model_encoder_layer_block["1"]["DenseReluDense"]["wi"]["kernel"] = t5x_mlp_wi flax_model_encoder_layer_block["1"]["DenseReluDense"]["wo"]["kernel"] = t5x_mlp_wo flax_model_encoder_layer_block["1"]["layer_norm"]["weight"] = t5x_mlp_layer_norm flax_model.params["encoder"]["block"][str(layer_index)]["layer"] = flax_model_encoder_layer_block # Only for layer 0: t5x_encoder_rel_embedding = t5x_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T flax_model.params["encoder"]["block"]["0"]["layer"]["0"][encoder_attn_name]["relative_attention_bias"][ "embedding" ] = t5x_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": t5x_encoder_global_rel_embedding = t5x_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T flax_model.params["encoder"]["block"]["0"]["layer"]["0"][encoder_attn_name]["global_relative_attention_bias"][ "embedding" ] = t5x_encoder_global_rel_embedding # Assigning t5x_encoder_norm = t5x_model["target"]["encoder"]["encoder_norm"]["scale"] flax_model.params["encoder"]["final_layer_norm"]["weight"] = t5x_encoder_norm # Decoder for layer_index in range(config.num_layers): layer_name = f"layers_{str(layer_index)}" # Self-Attention t5x_attention_key = t5x_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"] t5x_attention_out = t5x_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"] t5x_attention_query = t5x_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"] t5x_attention_value = t5x_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"] # Layer Normalization t5x_pre_attention_layer_norm = t5x_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][ "scale" ] # Encoder-Decoder-Attention t5x_enc_dec_attention_module = t5x_model["target"]["decoder"][layer_name]["encoder_decoder_attention"] t5x_enc_dec_attention_key = t5x_enc_dec_attention_module["key"]["kernel"] t5x_enc_dec_attention_out = t5x_enc_dec_attention_module["out"]["kernel"] t5x_enc_dec_attention_query = t5x_enc_dec_attention_module["query"]["kernel"] t5x_enc_dec_attention_value = t5x_enc_dec_attention_module["value"]["kernel"] # Layer Normalization t5x_cross_layer_norm = t5x_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"] # MLP if split_mlp_wi: t5x_mlp_wi_0 = t5x_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"] t5x_mlp_wi_1 = t5x_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"] else: t5x_mlp_wi = t5x_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"] t5x_mlp_wo = t5x_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"] # Layer Normalization tx5_mlp_layer_norm = t5x_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"] # Assigning flax_model_decoder_layer_block = flax_model.params["decoder"]["block"][str(layer_index)]["layer"] flax_model_decoder_layer_block["0"]["SelfAttention"]["k"]["kernel"] = t5x_attention_key flax_model_decoder_layer_block["0"]["SelfAttention"]["o"]["kernel"] = t5x_attention_out flax_model_decoder_layer_block["0"]["SelfAttention"]["q"]["kernel"] = t5x_attention_query flax_model_decoder_layer_block["0"]["SelfAttention"]["v"]["kernel"] = t5x_attention_value flax_model_decoder_layer_block["0"]["layer_norm"]["weight"] = t5x_pre_attention_layer_norm flax_model_decoder_layer_block["1"]["EncDecAttention"]["k"]["kernel"] = t5x_enc_dec_attention_key flax_model_decoder_layer_block["1"]["EncDecAttention"]["o"]["kernel"] = t5x_enc_dec_attention_out flax_model_decoder_layer_block["1"]["EncDecAttention"]["q"]["kernel"] = t5x_enc_dec_attention_query flax_model_decoder_layer_block["1"]["EncDecAttention"]["v"]["kernel"] = t5x_enc_dec_attention_value flax_model_decoder_layer_block["1"]["layer_norm"]["weight"] = t5x_cross_layer_norm if split_mlp_wi: flax_model_decoder_layer_block["2"]["DenseReluDense"]["wi_0"]["kernel"] = t5x_mlp_wi_0 flax_model_decoder_layer_block["2"]["DenseReluDense"]["wi_1"]["kernel"] = t5x_mlp_wi_1 else: flax_model_decoder_layer_block["2"]["DenseReluDense"]["wi"]["kernel"] = t5x_mlp_wi flax_model_decoder_layer_block["2"]["DenseReluDense"]["wo"]["kernel"] = t5x_mlp_wo flax_model_decoder_layer_block["2"]["layer_norm"]["weight"] = tx5_mlp_layer_norm flax_model.params["decoder"]["block"][str(layer_index)]["layer"] = flax_model_decoder_layer_block # Decoder Normalization tx5_decoder_norm = t5x_model["target"]["decoder"]["decoder_norm"]["scale"] flax_model.params["decoder"]["final_layer_norm"]["weight"] = tx5_decoder_norm # Only for layer 0: t5x_decoder_rel_embedding = t5x_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T flax_model.params["decoder"]["block"]["0"]["layer"]["0"]["SelfAttention"]["relative_attention_bias"][ "embedding" ] = t5x_decoder_rel_embedding # Token Embeddings tx5_token_embeddings = t5x_model["target"]["token_embedder"]["embedding"] flax_model.params["shared"]["embedding"] = tx5_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in t5x_model["target"]["decoder"]: flax_model.params["lm_head"]["kernel"] = t5x_model["target"]["decoder"]["logits_dense"]["kernel"] flax_model.save_pretrained(flax_dump_folder_path) print("T5X Model was sucessfully converted!") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint." ) parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.") parser.add_argument( "--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model." ) args = parser.parse_args() convert_t5x_checkpoint_to_flax(args.t5x_checkpoint_path, args.config_name, args.flax_dump_folder_path)
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./src/transformers/models/ibert/quant_modules.py
# coding=utf-8 # Copyright 2021 The I-BERT Authors (Sehoon Kim, Amir Gholami, Zhewei Yao, # Michael Mahoney, Kurt Keutzer - UC Berkeley) and The HuggingFace Inc. team. # Copyright (c) 20121, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import decimal import numpy as np import torch from torch import nn from torch.autograd import Function from ...utils import logging logger = logging.get_logger(__name__) class QuantEmbedding(nn.Module): """ Quantized version of `torch.nn.Embedding`. Adds quantization-specific arguments on top of `torch.nn.Embedding`. Args: weight_bit (`int`, *optional*, defaults to `8`): Bitwidth for the quantized weight. momentum (`float`, *optional*, defaults to `0.95`): Momentum for updating the activation quantization range. quant_mode (`bool`, *optional*, defaults to `False`): Whether or not the layer is quantized. """ def __init__( self, num_embeddings, embedding_dim, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False, _weight=None, weight_bit=8, momentum=0.95, quant_mode=False, ): super().__init__() self.num_ = num_embeddings self.dim = embedding_dim self.padding_idx = padding_idx self.max_norm = max_norm self.norm_type = norm_type self.scale_grad_by_freq = scale_grad_by_freq self.sparse = sparse self.weight = nn.Parameter(torch.zeros([num_embeddings, embedding_dim])) self.register_buffer("weight_scaling_factor", torch.zeros(1)) self.register_buffer("weight_integer", torch.zeros_like(self.weight)) self.weight_bit = weight_bit self.momentum = momentum self.quant_mode = quant_mode self.percentile_mode = False self.weight_function = SymmetricQuantFunction.apply def forward(self, x, positions=None, incremental_state=None): if not self.quant_mode: return ( nn.functional.embedding( x, self.weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse, ), None, ) w = self.weight w_transform = w.data.detach() w_min = w_transform.min().expand(1) w_max = w_transform.max().expand(1) self.weight_scaling_factor = symmetric_linear_quantization_params(self.weight_bit, w_min, w_max, False) self.weight_integer = self.weight_function( self.weight, self.weight_bit, self.percentile_mode, self.weight_scaling_factor ) emb_int = nn.functional.embedding( x, self.weight_integer, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse, ) return emb_int * self.weight_scaling_factor, self.weight_scaling_factor class QuantAct(nn.Module): """ Quantizes the given activation. Args: activation_bit (`int`): Bitwidth for the quantized activation. act_range_momentum (`float`, *optional*, defaults to `0.95`): Momentum for updating the activation quantization range. per_channel (`bool`, *optional*, defaults to `False`): Whether to or not use channel-wise quantization. channel_len (`int`, *optional*): Specify the channel length when set the *per_channel* True. quant_mode (`bool`, *optional*, defaults to `False`): Whether or not the layer is quantized. """ def __init__(self, activation_bit, act_range_momentum=0.95, per_channel=False, channel_len=None, quant_mode=False): super().__init__() self.activation_bit = activation_bit self.act_range_momentum = act_range_momentum self.quant_mode = quant_mode self.per_channel = per_channel self.percentile = False self.act_function = SymmetricQuantFunction.apply if not self.per_channel: self.register_buffer("x_min", torch.zeros(1)) self.register_buffer("x_max", torch.zeros(1)) self.register_buffer("act_scaling_factor", torch.zeros(1)) self.x_min -= 1e-5 self.x_max += 1e-5 else: raise NotImplementedError("per-channel mode is not currently supported for activation.") def __repr__(self): return ( f"{self.__class__.__name__}(activation_bit={self.activation_bit}, " f"quant_mode: {self.quant_mode}, Act_min: {self.x_min.item():.2f}, " f"Act_max: {self.x_max.item():.2f})" ) def forward( self, x, pre_act_scaling_factor=None, identity=None, identity_scaling_factor=None, specified_min=None, specified_max=None, ): x_act = x if identity is None else identity + x # collect running stats if training if self.training: assert not self.percentile, "percentile mode is not currently supported for activation." assert not self.per_channel, "per-channel mode is not currently supported for activation." x_min = x_act.data.min() x_max = x_act.data.max() assert ( x_max.isnan().sum() == 0 and x_min.isnan().sum() == 0 ), "NaN detected when computing min/max of the activation" # Initialization if self.x_min.min() > -1.1e-5 and self.x_max.max() < 1.1e-5: self.x_min = self.x_min + x_min self.x_max = self.x_max + x_max # exponential moving average (EMA) # use momentum to prevent the quantized values change greatly every iteration elif self.act_range_momentum == -1: self.x_min = torch.min(self.x_min, x_min) self.x_max = torch.max(self.x_max, x_max) else: self.x_min = self.x_min * self.act_range_momentum + x_min * (1 - self.act_range_momentum) self.x_max = self.x_max * self.act_range_momentum + x_max * (1 - self.act_range_momentum) if not self.quant_mode: return x_act, None x_min = self.x_min if specified_min is None else specified_min x_max = self.x_max if specified_max is None else specified_max self.act_scaling_factor = symmetric_linear_quantization_params( self.activation_bit, x_min, x_max, per_channel=self.per_channel ) if pre_act_scaling_factor is None: # this is for the input quantization quant_act_int = self.act_function(x, self.activation_bit, self.percentile, self.act_scaling_factor) else: quant_act_int = FixedPointMul.apply( x, pre_act_scaling_factor, self.activation_bit, self.act_scaling_factor, identity, identity_scaling_factor, ) correct_output_scale = self.act_scaling_factor.view(-1) return quant_act_int * correct_output_scale, self.act_scaling_factor class QuantLinear(nn.Module): """ Quantized version of `torch.nn.Linear`. Adds quantization-specific arguments on top of `torch.nn.Linear`. Args: weight_bit (`int`, *optional*, defaults to `8`): Bitwidth for the quantized weight. bias_bit (`int`, *optional*, defaults to `32`): Bitwidth for the quantized bias. per_channel (`bool`, *optional*, defaults to `False`): Whether or not to use channel-wise quantization. quant_mode (`bool`, *optional*, defaults to `False`): Whether or not the layer is quantized. """ def __init__( self, in_features, out_features, bias=True, weight_bit=8, bias_bit=32, per_channel=False, quant_mode=False ): super().__init__() self.in_features = in_features self.out_features = out_features self.weight = nn.Parameter(torch.zeros([out_features, in_features])) self.register_buffer("weight_integer", torch.zeros_like(self.weight)) self.register_buffer("fc_scaling_factor", torch.zeros(self.out_features)) if bias: self.bias = nn.Parameter(torch.zeros(out_features)) self.register_buffer("bias_integer", torch.zeros_like(self.bias)) self.weight_bit = weight_bit self.quant_mode = quant_mode self.per_channel = per_channel self.bias_bit = bias_bit self.quant_mode = quant_mode self.percentile_mode = False self.weight_function = SymmetricQuantFunction.apply def __repr__(self): s = super().__repr__() s = f"({s} weight_bit={self.weight_bit}, quant_mode={self.quant_mode})" return s def forward(self, x, prev_act_scaling_factor=None): if not self.quant_mode: return nn.functional.linear(x, weight=self.weight, bias=self.bias), None # assert that prev_act_scaling_factor is a scalar tensor assert prev_act_scaling_factor is not None and prev_act_scaling_factor.shape == (1,), ( "Input activation to the QuantLinear layer should be globally (non-channel-wise) quantized. " "Please add a QuantAct layer with `per_channel = True` before this QuantAct layer" ) w = self.weight w_transform = w.data.detach() if self.per_channel: w_min, _ = torch.min(w_transform, dim=1, out=None) w_max, _ = torch.max(w_transform, dim=1, out=None) else: w_min = w_transform.min().expand(1) w_max = w_transform.max().expand(1) self.fc_scaling_factor = symmetric_linear_quantization_params(self.weight_bit, w_min, w_max, self.per_channel) self.weight_integer = self.weight_function( self.weight, self.weight_bit, self.percentile_mode, self.fc_scaling_factor ) bias_scaling_factor = self.fc_scaling_factor * prev_act_scaling_factor if self.bias is not None: self.bias_integer = self.weight_function(self.bias, self.bias_bit, False, bias_scaling_factor) prev_act_scaling_factor = prev_act_scaling_factor.view(1, -1) x_int = x / prev_act_scaling_factor return ( nn.functional.linear(x_int, weight=self.weight_integer, bias=self.bias_integer) * bias_scaling_factor, bias_scaling_factor, ) class IntGELU(nn.Module): """ Quantized version of `torch.nn.GELU`. Adds quantization-specific arguments on top of `torch.nn.GELU`. Args: quant_mode (`bool`, *optional*, defaults to `False`): Whether or not the layer is quantized. force_dequant (`str`, *optional*, defaults to `"none"`): Force dequantize the layer if either "gelu" or "nonlinear" is given. """ def __init__(self, quant_mode=True, force_dequant="none"): super().__init__() self.quant_mode = quant_mode if force_dequant in ["nonlinear", "gelu"]: logger.info("Force dequantize gelu") self.quant_mode = False if not self.quant_mode: self.activation_fn = nn.GELU() self.k = 1.4142 self.const = 14 # dummy integer constant self.coeff = [-0.2888, -1.769, 1] # a(x+b)**2 + c self.coeff[2] /= self.coeff[0] def int_erf(self, x_int, scaling_factor): b_int = torch.floor(self.coeff[1] / scaling_factor) c_int = torch.floor(self.coeff[2] / scaling_factor**2) sign = torch.sign(x_int) abs_int = torch.min(torch.abs(x_int), -b_int) y_int = sign * ((abs_int + b_int) ** 2 + c_int) scaling_factor = scaling_factor**2 * self.coeff[0] # avoid overflow y_int = floor_ste.apply(y_int / 2**self.const) scaling_factor = scaling_factor * 2**self.const return y_int, scaling_factor def forward(self, x, scaling_factor=None): if not self.quant_mode: return self.activation_fn(x), None x_int = x / scaling_factor sigmoid_int, sigmoid_scaling_factor = self.int_erf(x_int, scaling_factor / self.k) shift_int = 1.0 // sigmoid_scaling_factor x_int = x_int * (sigmoid_int + shift_int) scaling_factor = scaling_factor * sigmoid_scaling_factor / 2 return x_int * scaling_factor, scaling_factor class IntSoftmax(nn.Module): """ Quantized version of `torch.nn.Softmax`. Adds quantization-specific arguments on top of `torch.nn.Softmax`. Args: output_bit (`int`): Bitwidth for the layer output activation. quant_mode (`bool`, *optional*, defaults to `False`): Whether or not the layer is quantized. force_dequant (`str`, *optional*, defaults to `"none"`): Force dequantize the layer if either "softmax" or "nonlinear" is given. """ def __init__(self, output_bit, quant_mode=False, force_dequant="none"): super().__init__() self.output_bit = output_bit self.max_bit = 32 self.quant_mode = quant_mode if force_dequant in ["nonlinear", "softmax"]: logger.info("Force dequantize softmax") self.quant_mode = False self.act = QuantAct(16, quant_mode=self.quant_mode) self.x0 = -0.6931 # -ln2 self.const = 30 # dummy integer constant self.coef = [0.35815147, 0.96963238, 1.0] # ax**2 + bx + c self.coef[1] /= self.coef[0] self.coef[2] /= self.coef[0] def int_polynomial(self, x_int, scaling_factor): with torch.no_grad(): b_int = torch.floor(self.coef[1] / scaling_factor) c_int = torch.floor(self.coef[2] / scaling_factor**2) z = (x_int + b_int) * x_int + c_int scaling_factor = self.coef[0] * scaling_factor**2 return z, scaling_factor def int_exp(self, x_int, scaling_factor): with torch.no_grad(): x0_int = torch.floor(self.x0 / scaling_factor) x_int = torch.max(x_int, self.const * x0_int) q = floor_ste.apply(x_int / x0_int) r = x_int - x0_int * q exp_int, exp_scaling_factor = self.int_polynomial(r, scaling_factor) exp_int = torch.clamp(floor_ste.apply(exp_int * 2 ** (self.const - q)), min=0) scaling_factor = exp_scaling_factor / 2**self.const return exp_int, scaling_factor def forward(self, x, scaling_factor): if not self.quant_mode: return nn.functional.softmax(x, dim=-1), None x_int = x / scaling_factor x_int_max, _ = x_int.max(dim=-1, keepdim=True) x_int = x_int - x_int_max exp_int, exp_scaling_factor = self.int_exp(x_int, scaling_factor) # Avoid overflow exp, exp_scaling_factor = self.act(exp_int, exp_scaling_factor) exp_int = exp / exp_scaling_factor exp_int_sum = exp_int.sum(dim=-1, keepdim=True) factor = floor_ste.apply(2**self.max_bit / exp_int_sum) exp_int = floor_ste.apply(exp_int * factor / 2 ** (self.max_bit - self.output_bit)) scaling_factor = 1 / 2**self.output_bit return exp_int * scaling_factor, scaling_factor class IntLayerNorm(nn.Module): """ Quantized version of `torch.nn.LayerNorm`. Adds quantization-specific arguments on top of `torch.nn.LayerNorm`. Args: output_bit (`int`, *optional*, defaults to `8`): Bitwidth for the layer output activation. quant_mode (`bool`, *optional*, defaults to `False`): Whether or not the layer is quantized. force_dequant (`str`, *optional*, defaults to `"none"`): Force dequantize the layer if either "layernorm" or "nonlinear" is given. """ def __init__(self, normalized_shape, eps, output_bit=8, quant_mode=False, force_dequant="none"): super().__init__() self.normalized_shape = normalized_shape self.eps = eps self.weight = nn.Parameter(torch.zeros(normalized_shape)) self.bias = nn.Parameter(torch.zeros(normalized_shape)) self.quant_mode = quant_mode if force_dequant in ["nonlinear", "layernorm"]: logger.info("Force dequantize layernorm") self.quant_mode = False self.register_buffer("shift", torch.zeros(1)) self.output_bit = output_bit self.max_bit = 32 self.dim_sqrt = None self.activation = QuantAct(self.output_bit, quant_mode=self.quant_mode) def set_shift(self, y_int): with torch.no_grad(): y_sq_int = y_int**2 var_int = torch.sum(y_sq_int, axis=2, keepdim=True) shift = (torch.log2(torch.sqrt(var_int / 2**self.max_bit)).ceil()).max() shift_old = self.shift self.shift = torch.max(self.shift, shift) logger.info(f"Dynamic shift adjustment: {int(shift_old)} -> {int(self.shift)}") def overflow_fallback(self, y_int): """ This fallback function is called when overflow is detected during training time, and adjusts the `self.shift` to avoid overflow in the subsequent runs. """ self.set_shift(y_int) # adjusts `self.shift` y_int_shifted = floor_ste.apply(y_int / 2**self.shift) y_sq_int = y_int_shifted**2 var_int = torch.sum(y_sq_int, axis=2, keepdim=True) return var_int def forward(self, x, scaling_factor=None): if not self.quant_mode: mean = x.mean(axis=2, keepdim=True) y = x - mean var = torch.mean(y**2, axis=2, keepdim=True) x = y / torch.sqrt(self.eps + var) x = x * self.weight + self.bias return x, None # compute sqrt of the feature dimension if it is the first run if self.dim_sqrt is None: n = torch.tensor(x.shape[2], dtype=torch.float) self.dim_sqrt = torch.sqrt(n).to(x.device) # Normalization: computes mean and variance(std) x_int = x / scaling_factor mean_int = round_ste.apply(x_int.mean(axis=2, keepdim=True)) y_int = x_int - mean_int y_int_shifted = floor_ste.apply(y_int / 2**self.shift) y_sq_int = y_int_shifted**2 var_int = torch.sum(y_sq_int, axis=2, keepdim=True) # overflow handling in training time if self.training: # if overflow is detected if var_int.max() >= 2**self.max_bit: var_int = self.overflow_fallback(y_int) assert var_int.max() < 2**self.max_bit + 0.1, ( "Error detected in overflow handling: " "`var_int` exceeds `self.max_bit` (the maximum possible bit width)" ) # To be replaced with integer-sqrt kernel that produces the same output std_int = floor_ste.apply(torch.sqrt(var_int)) * 2**self.shift factor = floor_ste.apply(2**31 / std_int) y_int = floor_ste.apply(y_int * factor / 2) scaling_factor = self.dim_sqrt / 2**30 # scaling and shifting bias = self.bias.data.detach() / (self.weight.data.detach()) bias_int = floor_ste.apply(bias / scaling_factor) y_int = y_int + bias_int scaling_factor = scaling_factor * self.weight x = y_int * scaling_factor return x, scaling_factor def get_percentile_min_max(input, lower_percentile, upper_percentile, output_tensor=False): """ Calculate the percentile max and min values in a given tensor Args: input (`torch.Tensor`): The target tensor to calculate percentile max and min. lower_percentile (`float`): If 0.1, means we return the value of the smallest 0.1% value in the tensor as percentile min. upper_percentile (`float`): If 99.9, means we return the value of the largest 0.1% value in the tensor as percentile max. output_tensor (`bool`, *optional*, defaults to `False`): If True, this function returns tensors, otherwise it returns values. Returns: `Tuple(torch.Tensor, torch.Tensor)`: Percentile min and max value of *input* """ input_length = input.shape[0] lower_index = round(input_length * (1 - lower_percentile * 0.01)) upper_index = round(input_length * upper_percentile * 0.01) upper_bound = torch.kthvalue(input, k=upper_index).values if lower_percentile == 0: lower_bound = upper_bound * 0 # lower_index += 1 else: lower_bound = -torch.kthvalue(-input, k=lower_index).values if not output_tensor: lower_bound = lower_bound.item() upper_bound = upper_bound.item() return lower_bound, upper_bound def linear_quantize(input, scale, zero_point, inplace=False): """ Quantize single-precision input tensor to integers with the given scaling factor and zeropoint. Args: input (`torch.Tensor`): Single-precision input tensor to be quantized. scale (`torch.Tensor`): Scaling factor for quantization. zero_pint (`torch.Tensor`): Shift for quantization. inplace (`bool`, *optional*, defaults to `False`): Whether to compute inplace or not. Returns: `torch.Tensor`: Linearly quantized value of *input* according to *scale* and *zero_point*. """ # reshape scale and zeropoint for convolutional weights and activation if len(input.shape) == 4: scale = scale.view(-1, 1, 1, 1) zero_point = zero_point.view(-1, 1, 1, 1) # reshape scale and zeropoint for linear weights elif len(input.shape) == 2: scale = scale.view(-1, 1) zero_point = zero_point.view(-1, 1) else: scale = scale.view(-1) zero_point = zero_point.view(-1) # quantized = float / scale + zero_point if inplace: input.mul_(1.0 / scale).add_(zero_point).round_() return input return torch.round(1.0 / scale * input + zero_point) def symmetric_linear_quantization_params(num_bits, saturation_min, saturation_max, per_channel=False): """ Compute the scaling factor with the given quantization range for symmetric quantization. Args: saturation_min (`torch.Tensor`): Lower bound for quantization range. saturation_max (`torch.Tensor`): Upper bound for quantization range. per_channel (`bool`, *optional*, defaults to `False`): Whether to or not use channel-wise quantization. Returns: `torch.Tensor`: Scaling factor that linearly quantizes the given range between *saturation_min* and *saturation_max*. """ # in this part, we do not need any gradient computation, # in order to enforce this, we put torch.no_grad() with torch.no_grad(): n = 2 ** (num_bits - 1) - 1 if per_channel: scale, _ = torch.max(torch.stack([saturation_min.abs(), saturation_max.abs()], dim=1), dim=1) scale = torch.clamp(scale, min=1e-8) / n else: scale = max(saturation_min.abs(), saturation_max.abs()) scale = torch.clamp(scale, min=1e-8) / n return scale class SymmetricQuantFunction(Function): """ Class to quantize the given floating-point values using symmetric quantization with given range and bitwidth. """ @staticmethod def forward(ctx, x, k, percentile_mode, scale): """ Args: x (`torch.Tensor`): Floating point tensor to be quantized. k (`int`): Quantization bitwidth. percentile_mode (`bool`): Whether or not to use percentile calibration. scale (`torch.Tensor`): Pre-calculated scaling factor for *x*. Note that the current implementation of SymmetricQuantFunction requires pre-calculated scaling factor. Returns: `torch.Tensor`: Symmetric-quantized value of *input*. """ zero_point = torch.tensor(0.0).to(scale.device) n = 2 ** (k - 1) - 1 new_quant_x = linear_quantize(x, scale, zero_point, inplace=False) new_quant_x = torch.clamp(new_quant_x, -n, n - 1) ctx.scale = scale return new_quant_x @staticmethod def backward(ctx, grad_output): scale = ctx.scale if len(grad_output.shape) == 4: scale = scale.view(-1, 1, 1, 1) # reshape scale and zeropoint for linear weights elif len(grad_output.shape) == 2: scale = scale.view(-1, 1) else: scale = scale.view(-1) return grad_output.clone() / scale, None, None, None, None class floor_ste(Function): """ Straight-through Estimator(STE) for torch.floor() """ @staticmethod def forward(ctx, x): return torch.floor(x) @staticmethod def backward(ctx, grad_output): return grad_output.clone() class round_ste(Function): """ Straight-through Estimator(STE) for torch.round() """ @staticmethod def forward(ctx, x): return torch.round(x) @staticmethod def backward(ctx, grad_output): return grad_output.clone() def batch_frexp(inputs, max_bit=31): """ Decompose the scaling factor into mantissa and twos exponent. Args: scaling_factor (`torch.Tensor`): Target scaling factor to decompose. Returns: ``Tuple(torch.Tensor, torch.Tensor)`: mantisa and exponent """ shape_of_input = inputs.size() # trans the input to be a 1-d tensor inputs = inputs.view(-1) output_m, output_e = np.frexp(inputs.cpu().numpy()) tmp_m = [] for m in output_m: int_m_shifted = int( decimal.Decimal(m * (2**max_bit)).quantize(decimal.Decimal("1"), rounding=decimal.ROUND_HALF_UP) ) tmp_m.append(int_m_shifted) output_m = np.array(tmp_m) output_e = float(max_bit) - output_e return ( torch.from_numpy(output_m).to(inputs.device).view(shape_of_input), torch.from_numpy(output_e).to(inputs.device).view(shape_of_input), ) class FixedPointMul(Function): """ Function to perform fixed-point arithmetic that can match integer arithmetic on hardware. Args: pre_act (`torch.Tensor`): Input tensor. pre_act_scaling_factor (`torch.Tensor`): Scaling factor of the input tensor *pre_act*. bit_num (`int`): Quantization bitwidth. z_scaling_factor (`torch.Tensor`): Scaling factor of the output tensor. identity (`torch.Tensor`, *optional*): Identity tensor, if exists. identity_scaling_factor (`torch.Tensor`, *optional*): Scaling factor of the identity tensor *identity*, if exists. Returns: `torch.Tensor`: Output tensor(*pre_act* if *identity* is not given, otherwise the addition of *pre_act* and *identity*), whose scale is rescaled to *z_scaling_factor*. """ @staticmethod def forward( ctx, pre_act, pre_act_scaling_factor, bit_num, z_scaling_factor, identity=None, identity_scaling_factor=None, ): if len(pre_act_scaling_factor.shape) == 3: reshape = lambda x: x # noqa: E731 else: reshape = lambda x: x.view(1, 1, -1) # noqa: E731 ctx.identity = identity n = 2 ** (bit_num - 1) - 1 with torch.no_grad(): pre_act_scaling_factor = reshape(pre_act_scaling_factor) if identity is not None: identity_scaling_factor = reshape(identity_scaling_factor) ctx.z_scaling_factor = z_scaling_factor z_int = torch.round(pre_act / pre_act_scaling_factor) _A = pre_act_scaling_factor.type(torch.double) _B = (z_scaling_factor.type(torch.float)).type(torch.double) new_scale = _A / _B new_scale = reshape(new_scale) m, e = batch_frexp(new_scale) output = z_int.type(torch.double) * m.type(torch.double) output = torch.round(output / (2.0**e)) if identity is not None: # needs addition of identity activation wx_int = torch.round(identity / identity_scaling_factor) _A = identity_scaling_factor.type(torch.double) _B = (z_scaling_factor.type(torch.float)).type(torch.double) new_scale = _A / _B new_scale = reshape(new_scale) m1, e1 = batch_frexp(new_scale) output1 = wx_int.type(torch.double) * m1.type(torch.double) output1 = torch.round(output1 / (2.0**e1)) output = output1 + output return torch.clamp(output.type(torch.float), -n - 1, n) @staticmethod def backward(ctx, grad_output): identity_grad = None if ctx.identity is not None: identity_grad = grad_output.clone() / ctx.z_scaling_factor return grad_output.clone() / ctx.z_scaling_factor, None, None, None, None, identity_grad, None
# coding=utf-8 # Copyright 2021 The I-BERT Authors (Sehoon Kim, Amir Gholami, Zhewei Yao, # Michael Mahoney, Kurt Keutzer - UC Berkeley) and The HuggingFace Inc. team. # Copyright (c) 20121, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import decimal import numpy as np import torch from torch import nn from torch.autograd import Function from ...utils import logging logger = logging.get_logger(__name__) class QuantEmbedding(nn.Module): """ Quantized version of `torch.nn.Embedding`. Adds quantization-specific arguments on top of `torch.nn.Embedding`. Args: weight_bit (`int`, *optional*, defaults to `8`): Bitwidth for the quantized weight. momentum (`float`, *optional*, defaults to `0.95`): Momentum for updating the activation quantization range. quant_mode (`bool`, *optional*, defaults to `False`): Whether or not the layer is quantized. """ def __init__( self, num_embeddings, embedding_dim, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False, _weight=None, weight_bit=8, momentum=0.95, quant_mode=False, ): super().__init__() self.num_ = num_embeddings self.dim = embedding_dim self.padding_idx = padding_idx self.max_norm = max_norm self.norm_type = norm_type self.scale_grad_by_freq = scale_grad_by_freq self.sparse = sparse self.weight = nn.Parameter(torch.zeros([num_embeddings, embedding_dim])) self.register_buffer("weight_scaling_factor", torch.zeros(1)) self.register_buffer("weight_integer", torch.zeros_like(self.weight)) self.weight_bit = weight_bit self.momentum = momentum self.quant_mode = quant_mode self.percentile_mode = False self.weight_function = SymmetricQuantFunction.apply def forward(self, x, positions=None, incremental_state=None): if not self.quant_mode: return ( nn.functional.embedding( x, self.weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse, ), None, ) w = self.weight w_transform = w.data.detach() w_min = w_transform.min().expand(1) w_max = w_transform.max().expand(1) self.weight_scaling_factor = symmetric_linear_quantization_params(self.weight_bit, w_min, w_max, False) self.weight_integer = self.weight_function( self.weight, self.weight_bit, self.percentile_mode, self.weight_scaling_factor ) emb_int = nn.functional.embedding( x, self.weight_integer, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse, ) return emb_int * self.weight_scaling_factor, self.weight_scaling_factor class QuantAct(nn.Module): """ Quantizes the given activation. Args: activation_bit (`int`): Bitwidth for the quantized activation. act_range_momentum (`float`, *optional*, defaults to `0.95`): Momentum for updating the activation quantization range. per_channel (`bool`, *optional*, defaults to `False`): Whether to or not use channel-wise quantization. channel_len (`int`, *optional*): Specify the channel length when set the *per_channel* True. quant_mode (`bool`, *optional*, defaults to `False`): Whether or not the layer is quantized. """ def __init__(self, activation_bit, act_range_momentum=0.95, per_channel=False, channel_len=None, quant_mode=False): super().__init__() self.activation_bit = activation_bit self.act_range_momentum = act_range_momentum self.quant_mode = quant_mode self.per_channel = per_channel self.percentile = False self.act_function = SymmetricQuantFunction.apply if not self.per_channel: self.register_buffer("x_min", torch.zeros(1)) self.register_buffer("x_max", torch.zeros(1)) self.register_buffer("act_scaling_factor", torch.zeros(1)) self.x_min -= 1e-5 self.x_max += 1e-5 else: raise NotImplementedError("per-channel mode is not currently supported for activation.") def __repr__(self): return ( f"{self.__class__.__name__}(activation_bit={self.activation_bit}, " f"quant_mode: {self.quant_mode}, Act_min: {self.x_min.item():.2f}, " f"Act_max: {self.x_max.item():.2f})" ) def forward( self, x, pre_act_scaling_factor=None, identity=None, identity_scaling_factor=None, specified_min=None, specified_max=None, ): x_act = x if identity is None else identity + x # collect running stats if training if self.training: assert not self.percentile, "percentile mode is not currently supported for activation." assert not self.per_channel, "per-channel mode is not currently supported for activation." x_min = x_act.data.min() x_max = x_act.data.max() assert ( x_max.isnan().sum() == 0 and x_min.isnan().sum() == 0 ), "NaN detected when computing min/max of the activation" # Initialization if self.x_min.min() > -1.1e-5 and self.x_max.max() < 1.1e-5: self.x_min = self.x_min + x_min self.x_max = self.x_max + x_max # exponential moving average (EMA) # use momentum to prevent the quantized values change greatly every iteration elif self.act_range_momentum == -1: self.x_min = torch.min(self.x_min, x_min) self.x_max = torch.max(self.x_max, x_max) else: self.x_min = self.x_min * self.act_range_momentum + x_min * (1 - self.act_range_momentum) self.x_max = self.x_max * self.act_range_momentum + x_max * (1 - self.act_range_momentum) if not self.quant_mode: return x_act, None x_min = self.x_min if specified_min is None else specified_min x_max = self.x_max if specified_max is None else specified_max self.act_scaling_factor = symmetric_linear_quantization_params( self.activation_bit, x_min, x_max, per_channel=self.per_channel ) if pre_act_scaling_factor is None: # this is for the input quantization quant_act_int = self.act_function(x, self.activation_bit, self.percentile, self.act_scaling_factor) else: quant_act_int = FixedPointMul.apply( x, pre_act_scaling_factor, self.activation_bit, self.act_scaling_factor, identity, identity_scaling_factor, ) correct_output_scale = self.act_scaling_factor.view(-1) return quant_act_int * correct_output_scale, self.act_scaling_factor class QuantLinear(nn.Module): """ Quantized version of `torch.nn.Linear`. Adds quantization-specific arguments on top of `torch.nn.Linear`. Args: weight_bit (`int`, *optional*, defaults to `8`): Bitwidth for the quantized weight. bias_bit (`int`, *optional*, defaults to `32`): Bitwidth for the quantized bias. per_channel (`bool`, *optional*, defaults to `False`): Whether or not to use channel-wise quantization. quant_mode (`bool`, *optional*, defaults to `False`): Whether or not the layer is quantized. """ def __init__( self, in_features, out_features, bias=True, weight_bit=8, bias_bit=32, per_channel=False, quant_mode=False ): super().__init__() self.in_features = in_features self.out_features = out_features self.weight = nn.Parameter(torch.zeros([out_features, in_features])) self.register_buffer("weight_integer", torch.zeros_like(self.weight)) self.register_buffer("fc_scaling_factor", torch.zeros(self.out_features)) if bias: self.bias = nn.Parameter(torch.zeros(out_features)) self.register_buffer("bias_integer", torch.zeros_like(self.bias)) self.weight_bit = weight_bit self.quant_mode = quant_mode self.per_channel = per_channel self.bias_bit = bias_bit self.quant_mode = quant_mode self.percentile_mode = False self.weight_function = SymmetricQuantFunction.apply def __repr__(self): s = super().__repr__() s = f"({s} weight_bit={self.weight_bit}, quant_mode={self.quant_mode})" return s def forward(self, x, prev_act_scaling_factor=None): if not self.quant_mode: return nn.functional.linear(x, weight=self.weight, bias=self.bias), None # assert that prev_act_scaling_factor is a scalar tensor assert prev_act_scaling_factor is not None and prev_act_scaling_factor.shape == (1,), ( "Input activation to the QuantLinear layer should be globally (non-channel-wise) quantized. " "Please add a QuantAct layer with `per_channel = True` before this QuantAct layer" ) w = self.weight w_transform = w.data.detach() if self.per_channel: w_min, _ = torch.min(w_transform, dim=1, out=None) w_max, _ = torch.max(w_transform, dim=1, out=None) else: w_min = w_transform.min().expand(1) w_max = w_transform.max().expand(1) self.fc_scaling_factor = symmetric_linear_quantization_params(self.weight_bit, w_min, w_max, self.per_channel) self.weight_integer = self.weight_function( self.weight, self.weight_bit, self.percentile_mode, self.fc_scaling_factor ) bias_scaling_factor = self.fc_scaling_factor * prev_act_scaling_factor if self.bias is not None: self.bias_integer = self.weight_function(self.bias, self.bias_bit, False, bias_scaling_factor) prev_act_scaling_factor = prev_act_scaling_factor.view(1, -1) x_int = x / prev_act_scaling_factor return ( nn.functional.linear(x_int, weight=self.weight_integer, bias=self.bias_integer) * bias_scaling_factor, bias_scaling_factor, ) class IntGELU(nn.Module): """ Quantized version of `torch.nn.GELU`. Adds quantization-specific arguments on top of `torch.nn.GELU`. Args: quant_mode (`bool`, *optional*, defaults to `False`): Whether or not the layer is quantized. force_dequant (`str`, *optional*, defaults to `"none"`): Force dequantize the layer if either "gelu" or "nonlinear" is given. """ def __init__(self, quant_mode=True, force_dequant="none"): super().__init__() self.quant_mode = quant_mode if force_dequant in ["nonlinear", "gelu"]: logger.info("Force dequantize gelu") self.quant_mode = False if not self.quant_mode: self.activation_fn = nn.GELU() self.k = 1.4142 self.const = 14 # dummy integer constant self.coeff = [-0.2888, -1.769, 1] # a(x+b)**2 + c self.coeff[2] /= self.coeff[0] def int_erf(self, x_int, scaling_factor): b_int = torch.floor(self.coeff[1] / scaling_factor) c_int = torch.floor(self.coeff[2] / scaling_factor**2) sign = torch.sign(x_int) abs_int = torch.min(torch.abs(x_int), -b_int) y_int = sign * ((abs_int + b_int) ** 2 + c_int) scaling_factor = scaling_factor**2 * self.coeff[0] # avoid overflow y_int = floor_ste.apply(y_int / 2**self.const) scaling_factor = scaling_factor * 2**self.const return y_int, scaling_factor def forward(self, x, scaling_factor=None): if not self.quant_mode: return self.activation_fn(x), None x_int = x / scaling_factor sigmoid_int, sigmoid_scaling_factor = self.int_erf(x_int, scaling_factor / self.k) shift_int = 1.0 // sigmoid_scaling_factor x_int = x_int * (sigmoid_int + shift_int) scaling_factor = scaling_factor * sigmoid_scaling_factor / 2 return x_int * scaling_factor, scaling_factor class IntSoftmax(nn.Module): """ Quantized version of `torch.nn.Softmax`. Adds quantization-specific arguments on top of `torch.nn.Softmax`. Args: output_bit (`int`): Bitwidth for the layer output activation. quant_mode (`bool`, *optional*, defaults to `False`): Whether or not the layer is quantized. force_dequant (`str`, *optional*, defaults to `"none"`): Force dequantize the layer if either "softmax" or "nonlinear" is given. """ def __init__(self, output_bit, quant_mode=False, force_dequant="none"): super().__init__() self.output_bit = output_bit self.max_bit = 32 self.quant_mode = quant_mode if force_dequant in ["nonlinear", "softmax"]: logger.info("Force dequantize softmax") self.quant_mode = False self.act = QuantAct(16, quant_mode=self.quant_mode) self.x0 = -0.6931 # -ln2 self.const = 30 # dummy integer constant self.coef = [0.35815147, 0.96963238, 1.0] # ax**2 + bx + c self.coef[1] /= self.coef[0] self.coef[2] /= self.coef[0] def int_polynomial(self, x_int, scaling_factor): with torch.no_grad(): b_int = torch.floor(self.coef[1] / scaling_factor) c_int = torch.floor(self.coef[2] / scaling_factor**2) z = (x_int + b_int) * x_int + c_int scaling_factor = self.coef[0] * scaling_factor**2 return z, scaling_factor def int_exp(self, x_int, scaling_factor): with torch.no_grad(): x0_int = torch.floor(self.x0 / scaling_factor) x_int = torch.max(x_int, self.const * x0_int) q = floor_ste.apply(x_int / x0_int) r = x_int - x0_int * q exp_int, exp_scaling_factor = self.int_polynomial(r, scaling_factor) exp_int = torch.clamp(floor_ste.apply(exp_int * 2 ** (self.const - q)), min=0) scaling_factor = exp_scaling_factor / 2**self.const return exp_int, scaling_factor def forward(self, x, scaling_factor): if not self.quant_mode: return nn.functional.softmax(x, dim=-1), None x_int = x / scaling_factor x_int_max, _ = x_int.max(dim=-1, keepdim=True) x_int = x_int - x_int_max exp_int, exp_scaling_factor = self.int_exp(x_int, scaling_factor) # Avoid overflow exp, exp_scaling_factor = self.act(exp_int, exp_scaling_factor) exp_int = exp / exp_scaling_factor exp_int_sum = exp_int.sum(dim=-1, keepdim=True) factor = floor_ste.apply(2**self.max_bit / exp_int_sum) exp_int = floor_ste.apply(exp_int * factor / 2 ** (self.max_bit - self.output_bit)) scaling_factor = 1 / 2**self.output_bit return exp_int * scaling_factor, scaling_factor class IntLayerNorm(nn.Module): """ Quantized version of `torch.nn.LayerNorm`. Adds quantization-specific arguments on top of `torch.nn.LayerNorm`. Args: output_bit (`int`, *optional*, defaults to `8`): Bitwidth for the layer output activation. quant_mode (`bool`, *optional*, defaults to `False`): Whether or not the layer is quantized. force_dequant (`str`, *optional*, defaults to `"none"`): Force dequantize the layer if either "layernorm" or "nonlinear" is given. """ def __init__(self, normalized_shape, eps, output_bit=8, quant_mode=False, force_dequant="none"): super().__init__() self.normalized_shape = normalized_shape self.eps = eps self.weight = nn.Parameter(torch.zeros(normalized_shape)) self.bias = nn.Parameter(torch.zeros(normalized_shape)) self.quant_mode = quant_mode if force_dequant in ["nonlinear", "layernorm"]: logger.info("Force dequantize layernorm") self.quant_mode = False self.register_buffer("shift", torch.zeros(1)) self.output_bit = output_bit self.max_bit = 32 self.dim_sqrt = None self.activation = QuantAct(self.output_bit, quant_mode=self.quant_mode) def set_shift(self, y_int): with torch.no_grad(): y_sq_int = y_int**2 var_int = torch.sum(y_sq_int, axis=2, keepdim=True) shift = (torch.log2(torch.sqrt(var_int / 2**self.max_bit)).ceil()).max() shift_old = self.shift self.shift = torch.max(self.shift, shift) logger.info(f"Dynamic shift adjustment: {int(shift_old)} -> {int(self.shift)}") def overflow_fallback(self, y_int): """ This fallback function is called when overflow is detected during training time, and adjusts the `self.shift` to avoid overflow in the subsequent runs. """ self.set_shift(y_int) # adjusts `self.shift` y_int_shifted = floor_ste.apply(y_int / 2**self.shift) y_sq_int = y_int_shifted**2 var_int = torch.sum(y_sq_int, axis=2, keepdim=True) return var_int def forward(self, x, scaling_factor=None): if not self.quant_mode: mean = x.mean(axis=2, keepdim=True) y = x - mean var = torch.mean(y**2, axis=2, keepdim=True) x = y / torch.sqrt(self.eps + var) x = x * self.weight + self.bias return x, None # compute sqrt of the feature dimension if it is the first run if self.dim_sqrt is None: n = torch.tensor(x.shape[2], dtype=torch.float) self.dim_sqrt = torch.sqrt(n).to(x.device) # Normalization: computes mean and variance(std) x_int = x / scaling_factor mean_int = round_ste.apply(x_int.mean(axis=2, keepdim=True)) y_int = x_int - mean_int y_int_shifted = floor_ste.apply(y_int / 2**self.shift) y_sq_int = y_int_shifted**2 var_int = torch.sum(y_sq_int, axis=2, keepdim=True) # overflow handling in training time if self.training: # if overflow is detected if var_int.max() >= 2**self.max_bit: var_int = self.overflow_fallback(y_int) assert var_int.max() < 2**self.max_bit + 0.1, ( "Error detected in overflow handling: " "`var_int` exceeds `self.max_bit` (the maximum possible bit width)" ) # To be replaced with integer-sqrt kernel that produces the same output std_int = floor_ste.apply(torch.sqrt(var_int)) * 2**self.shift factor = floor_ste.apply(2**31 / std_int) y_int = floor_ste.apply(y_int * factor / 2) scaling_factor = self.dim_sqrt / 2**30 # scaling and shifting bias = self.bias.data.detach() / (self.weight.data.detach()) bias_int = floor_ste.apply(bias / scaling_factor) y_int = y_int + bias_int scaling_factor = scaling_factor * self.weight x = y_int * scaling_factor return x, scaling_factor def get_percentile_min_max(input, lower_percentile, upper_percentile, output_tensor=False): """ Calculate the percentile max and min values in a given tensor Args: input (`torch.Tensor`): The target tensor to calculate percentile max and min. lower_percentile (`float`): If 0.1, means we return the value of the smallest 0.1% value in the tensor as percentile min. upper_percentile (`float`): If 99.9, means we return the value of the largest 0.1% value in the tensor as percentile max. output_tensor (`bool`, *optional*, defaults to `False`): If True, this function returns tensors, otherwise it returns values. Returns: `Tuple(torch.Tensor, torch.Tensor)`: Percentile min and max value of *input* """ input_length = input.shape[0] lower_index = round(input_length * (1 - lower_percentile * 0.01)) upper_index = round(input_length * upper_percentile * 0.01) upper_bound = torch.kthvalue(input, k=upper_index).values if lower_percentile == 0: lower_bound = upper_bound * 0 # lower_index += 1 else: lower_bound = -torch.kthvalue(-input, k=lower_index).values if not output_tensor: lower_bound = lower_bound.item() upper_bound = upper_bound.item() return lower_bound, upper_bound def linear_quantize(input, scale, zero_point, inplace=False): """ Quantize single-precision input tensor to integers with the given scaling factor and zeropoint. Args: input (`torch.Tensor`): Single-precision input tensor to be quantized. scale (`torch.Tensor`): Scaling factor for quantization. zero_pint (`torch.Tensor`): Shift for quantization. inplace (`bool`, *optional*, defaults to `False`): Whether to compute inplace or not. Returns: `torch.Tensor`: Linearly quantized value of *input* according to *scale* and *zero_point*. """ # reshape scale and zeropoint for convolutional weights and activation if len(input.shape) == 4: scale = scale.view(-1, 1, 1, 1) zero_point = zero_point.view(-1, 1, 1, 1) # reshape scale and zeropoint for linear weights elif len(input.shape) == 2: scale = scale.view(-1, 1) zero_point = zero_point.view(-1, 1) else: scale = scale.view(-1) zero_point = zero_point.view(-1) # quantized = float / scale + zero_point if inplace: input.mul_(1.0 / scale).add_(zero_point).round_() return input return torch.round(1.0 / scale * input + zero_point) def symmetric_linear_quantization_params(num_bits, saturation_min, saturation_max, per_channel=False): """ Compute the scaling factor with the given quantization range for symmetric quantization. Args: saturation_min (`torch.Tensor`): Lower bound for quantization range. saturation_max (`torch.Tensor`): Upper bound for quantization range. per_channel (`bool`, *optional*, defaults to `False`): Whether to or not use channel-wise quantization. Returns: `torch.Tensor`: Scaling factor that linearly quantizes the given range between *saturation_min* and *saturation_max*. """ # in this part, we do not need any gradient computation, # in order to enforce this, we put torch.no_grad() with torch.no_grad(): n = 2 ** (num_bits - 1) - 1 if per_channel: scale, _ = torch.max(torch.stack([saturation_min.abs(), saturation_max.abs()], dim=1), dim=1) scale = torch.clamp(scale, min=1e-8) / n else: scale = max(saturation_min.abs(), saturation_max.abs()) scale = torch.clamp(scale, min=1e-8) / n return scale class SymmetricQuantFunction(Function): """ Class to quantize the given floating-point values using symmetric quantization with given range and bitwidth. """ @staticmethod def forward(ctx, x, k, percentile_mode, scale): """ Args: x (`torch.Tensor`): Floating point tensor to be quantized. k (`int`): Quantization bitwidth. percentile_mode (`bool`): Whether or not to use percentile calibration. scale (`torch.Tensor`): Pre-calculated scaling factor for *x*. Note that the current implementation of SymmetricQuantFunction requires pre-calculated scaling factor. Returns: `torch.Tensor`: Symmetric-quantized value of *input*. """ zero_point = torch.tensor(0.0).to(scale.device) n = 2 ** (k - 1) - 1 new_quant_x = linear_quantize(x, scale, zero_point, inplace=False) new_quant_x = torch.clamp(new_quant_x, -n, n - 1) ctx.scale = scale return new_quant_x @staticmethod def backward(ctx, grad_output): scale = ctx.scale if len(grad_output.shape) == 4: scale = scale.view(-1, 1, 1, 1) # reshape scale and zeropoint for linear weights elif len(grad_output.shape) == 2: scale = scale.view(-1, 1) else: scale = scale.view(-1) return grad_output.clone() / scale, None, None, None, None class floor_ste(Function): """ Straight-through Estimator(STE) for torch.floor() """ @staticmethod def forward(ctx, x): return torch.floor(x) @staticmethod def backward(ctx, grad_output): return grad_output.clone() class round_ste(Function): """ Straight-through Estimator(STE) for torch.round() """ @staticmethod def forward(ctx, x): return torch.round(x) @staticmethod def backward(ctx, grad_output): return grad_output.clone() def batch_frexp(inputs, max_bit=31): """ Decompose the scaling factor into mantissa and twos exponent. Args: scaling_factor (`torch.Tensor`): Target scaling factor to decompose. Returns: ``Tuple(torch.Tensor, torch.Tensor)`: mantisa and exponent """ shape_of_input = inputs.size() # trans the input to be a 1-d tensor inputs = inputs.view(-1) output_m, output_e = np.frexp(inputs.cpu().numpy()) tmp_m = [] for m in output_m: int_m_shifted = int( decimal.Decimal(m * (2**max_bit)).quantize(decimal.Decimal("1"), rounding=decimal.ROUND_HALF_UP) ) tmp_m.append(int_m_shifted) output_m = np.array(tmp_m) output_e = float(max_bit) - output_e return ( torch.from_numpy(output_m).to(inputs.device).view(shape_of_input), torch.from_numpy(output_e).to(inputs.device).view(shape_of_input), ) class FixedPointMul(Function): """ Function to perform fixed-point arithmetic that can match integer arithmetic on hardware. Args: pre_act (`torch.Tensor`): Input tensor. pre_act_scaling_factor (`torch.Tensor`): Scaling factor of the input tensor *pre_act*. bit_num (`int`): Quantization bitwidth. z_scaling_factor (`torch.Tensor`): Scaling factor of the output tensor. identity (`torch.Tensor`, *optional*): Identity tensor, if exists. identity_scaling_factor (`torch.Tensor`, *optional*): Scaling factor of the identity tensor *identity*, if exists. Returns: `torch.Tensor`: Output tensor(*pre_act* if *identity* is not given, otherwise the addition of *pre_act* and *identity*), whose scale is rescaled to *z_scaling_factor*. """ @staticmethod def forward( ctx, pre_act, pre_act_scaling_factor, bit_num, z_scaling_factor, identity=None, identity_scaling_factor=None, ): if len(pre_act_scaling_factor.shape) == 3: reshape = lambda x: x # noqa: E731 else: reshape = lambda x: x.view(1, 1, -1) # noqa: E731 ctx.identity = identity n = 2 ** (bit_num - 1) - 1 with torch.no_grad(): pre_act_scaling_factor = reshape(pre_act_scaling_factor) if identity is not None: identity_scaling_factor = reshape(identity_scaling_factor) ctx.z_scaling_factor = z_scaling_factor z_int = torch.round(pre_act / pre_act_scaling_factor) _A = pre_act_scaling_factor.type(torch.double) _B = (z_scaling_factor.type(torch.float)).type(torch.double) new_scale = _A / _B new_scale = reshape(new_scale) m, e = batch_frexp(new_scale) output = z_int.type(torch.double) * m.type(torch.double) output = torch.round(output / (2.0**e)) if identity is not None: # needs addition of identity activation wx_int = torch.round(identity / identity_scaling_factor) _A = identity_scaling_factor.type(torch.double) _B = (z_scaling_factor.type(torch.float)).type(torch.double) new_scale = _A / _B new_scale = reshape(new_scale) m1, e1 = batch_frexp(new_scale) output1 = wx_int.type(torch.double) * m1.type(torch.double) output1 = torch.round(output1 / (2.0**e1)) output = output1 + output return torch.clamp(output.type(torch.float), -n - 1, n) @staticmethod def backward(ctx, grad_output): identity_grad = None if ctx.identity is not None: identity_grad = grad_output.clone() / ctx.z_scaling_factor return grad_output.clone() / ctx.z_scaling_factor, None, None, None, None, identity_grad, None
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./tests/models/mctct/test_feature_extraction_mctct.py
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import random import unittest import numpy as np from transformers import is_speech_available from transformers.testing_utils import require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import MCTCTFeatureExtractor global_rng = random.Random() def floats_list(shape, scale=1.0, rng=None, name=None): """Creates a random float32 tensor""" if rng is None: rng = global_rng values = [] for _batch_idx in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values @require_torch @require_torchaudio class MCTCTFeatureExtractionTester(unittest.TestCase): def __init__( self, parent, batch_size=7, min_seq_length=400, max_seq_length=2000, feature_size=24, num_mel_bins=24, padding_value=0.0, sampling_rate=16_000, return_attention_mask=True, do_normalize=True, ): self.parent = parent self.batch_size = batch_size self.min_seq_length = min_seq_length self.max_seq_length = max_seq_length self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) self.feature_size = feature_size self.num_mel_bins = num_mel_bins self.padding_value = padding_value self.sampling_rate = sampling_rate self.return_attention_mask = return_attention_mask self.do_normalize = do_normalize def prepare_feat_extract_dict(self): return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def prepare_inputs_for_common(self, equal_length=False, numpify=False): def _flatten(list_of_lists): return list(itertools.chain(*list_of_lists)) if equal_length: speech_inputs = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)] else: # make sure that inputs increase in size speech_inputs = [ floats_list((x, self.feature_size)) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff) ] if numpify: speech_inputs = [np.asarray(x) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class MCTCTFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase): feature_extraction_class = MCTCTFeatureExtractor if is_speech_available() else None def setUp(self): self.feat_extract_tester = MCTCTFeatureExtractionTester(self) def _check_zero_mean_unit_variance(self, input_vector): self.assertTrue(np.all(np.mean(input_vector) < 1e-3)) self.assertTrue(np.all(np.abs(np.var(input_vector) - 1) < 1e-3)) def test_call(self): # Tests that all call wrap to encode_plus and batch_encode_plus feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) # create three inputs of length 800, 1000, and 12000 speech_inputs = [floats_list((1, x))[0] for x in range(8000, 14000, 2000)] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] # Test feature size input_features = feature_extractor(np_speech_inputs, padding=True, return_tensors="np").input_features self.assertTrue(input_features.ndim == 3) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size) # Test not batched input encoded_sequences_1 = feature_extractor(speech_inputs[0], return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs[0], return_tensors="np").input_features self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3)) # Test batched encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) def test_cepstral_mean_and_variance_normalization(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(8000, 14000, 2000)] paddings = ["longest", "max_length", "do_not_pad"] max_lengths = [None, 16, None] for max_length, padding in zip(max_lengths, paddings): inputs = feature_extractor( speech_inputs, padding=padding, max_length=max_length, return_attention_mask=True, truncation=max_length is not None, # reference to #16419 ) input_features = inputs.input_features attention_mask = inputs.attention_mask fbank_feat_lengths = [np.sum(x) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]]) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]]) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]]) def test_cepstral_mean_and_variance_normalization_np(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(8000, 14000, 2000)] paddings = ["longest", "max_length", "do_not_pad"] max_lengths = [None, 16, None] for max_length, padding in zip(max_lengths, paddings): inputs = feature_extractor( speech_inputs, max_length=max_length, padding=padding, return_tensors="np", return_attention_mask=True, truncation=max_length is not None, ) input_features = inputs.input_features attention_mask = inputs.attention_mask fbank_feat_lengths = [np.sum(x) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]]) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]]) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]]) def test_cepstral_mean_and_variance_normalization_trunc_max_length(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(8000, 14000, 2000)] inputs = feature_extractor( speech_inputs, padding="max_length", max_length=4, truncation=True, return_tensors="np", return_attention_mask=True, ) input_features = inputs.input_features attention_mask = inputs.attention_mask fbank_feat_lengths = np.sum(attention_mask == 1, axis=1) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]]) self._check_zero_mean_unit_variance(input_features[1]) self._check_zero_mean_unit_variance(input_features[2]) def test_cepstral_mean_and_variance_normalization_trunc_longest(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(8000, 14000, 2000)] inputs = feature_extractor( speech_inputs, padding="longest", max_length=4, truncation=True, return_tensors="np", return_attention_mask=True, ) input_features = inputs.input_features attention_mask = inputs.attention_mask fbank_feat_lengths = np.sum(attention_mask == 1, axis=1) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]]) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]]) self._check_zero_mean_unit_variance(input_features[2]) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape, (3, 4, 24)) speech_inputs = [floats_list((1, x))[0] for x in range(8000, 14000, 2000)] inputs = feature_extractor( speech_inputs, padding="longest", max_length=16, truncation=True, return_tensors="np", return_attention_mask=True, ) input_features = inputs.input_features attention_mask = inputs.attention_mask fbank_feat_lengths = np.sum(attention_mask == 1, axis=1) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]]) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]]) self._check_zero_mean_unit_variance(input_features[2]) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape, (3, 16, 24)) def test_double_precision_pad(self): import torch feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) np_speech_inputs = np.random.rand(100, 32).astype(np.float64) py_speech_inputs = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: np_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="np") self.assertTrue(np_processed.input_features.dtype == np.float32) pt_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt") self.assertTrue(pt_processed.input_features.dtype == torch.float32) def test_different_window(self): import torch init_dict = self.feat_extract_tester.prepare_feat_extract_dict() init_dict["win_function"] = "hann_window" feature_extractor = self.feature_extraction_class(**init_dict) np_speech_inputs = np.random.rand(100, 32).astype(np.float64) py_speech_inputs = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: np_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="np") self.assertTrue(np_processed.input_features.dtype == np.float32) pt_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt") self.assertTrue(pt_processed.input_features.dtype == torch.float32)
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import random import unittest import numpy as np from transformers import is_speech_available from transformers.testing_utils import require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import MCTCTFeatureExtractor global_rng = random.Random() def floats_list(shape, scale=1.0, rng=None, name=None): """Creates a random float32 tensor""" if rng is None: rng = global_rng values = [] for _batch_idx in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values @require_torch @require_torchaudio class MCTCTFeatureExtractionTester(unittest.TestCase): def __init__( self, parent, batch_size=7, min_seq_length=400, max_seq_length=2000, feature_size=24, num_mel_bins=24, padding_value=0.0, sampling_rate=16_000, return_attention_mask=True, do_normalize=True, ): self.parent = parent self.batch_size = batch_size self.min_seq_length = min_seq_length self.max_seq_length = max_seq_length self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) self.feature_size = feature_size self.num_mel_bins = num_mel_bins self.padding_value = padding_value self.sampling_rate = sampling_rate self.return_attention_mask = return_attention_mask self.do_normalize = do_normalize def prepare_feat_extract_dict(self): return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def prepare_inputs_for_common(self, equal_length=False, numpify=False): def _flatten(list_of_lists): return list(itertools.chain(*list_of_lists)) if equal_length: speech_inputs = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)] else: # make sure that inputs increase in size speech_inputs = [ floats_list((x, self.feature_size)) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff) ] if numpify: speech_inputs = [np.asarray(x) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class MCTCTFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase): feature_extraction_class = MCTCTFeatureExtractor if is_speech_available() else None def setUp(self): self.feat_extract_tester = MCTCTFeatureExtractionTester(self) def _check_zero_mean_unit_variance(self, input_vector): self.assertTrue(np.all(np.mean(input_vector) < 1e-3)) self.assertTrue(np.all(np.abs(np.var(input_vector) - 1) < 1e-3)) def test_call(self): # Tests that all call wrap to encode_plus and batch_encode_plus feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) # create three inputs of length 800, 1000, and 12000 speech_inputs = [floats_list((1, x))[0] for x in range(8000, 14000, 2000)] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] # Test feature size input_features = feature_extractor(np_speech_inputs, padding=True, return_tensors="np").input_features self.assertTrue(input_features.ndim == 3) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size) # Test not batched input encoded_sequences_1 = feature_extractor(speech_inputs[0], return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs[0], return_tensors="np").input_features self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3)) # Test batched encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) def test_cepstral_mean_and_variance_normalization(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(8000, 14000, 2000)] paddings = ["longest", "max_length", "do_not_pad"] max_lengths = [None, 16, None] for max_length, padding in zip(max_lengths, paddings): inputs = feature_extractor( speech_inputs, padding=padding, max_length=max_length, return_attention_mask=True, truncation=max_length is not None, # reference to #16419 ) input_features = inputs.input_features attention_mask = inputs.attention_mask fbank_feat_lengths = [np.sum(x) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]]) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]]) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]]) def test_cepstral_mean_and_variance_normalization_np(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(8000, 14000, 2000)] paddings = ["longest", "max_length", "do_not_pad"] max_lengths = [None, 16, None] for max_length, padding in zip(max_lengths, paddings): inputs = feature_extractor( speech_inputs, max_length=max_length, padding=padding, return_tensors="np", return_attention_mask=True, truncation=max_length is not None, ) input_features = inputs.input_features attention_mask = inputs.attention_mask fbank_feat_lengths = [np.sum(x) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]]) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]]) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]]) def test_cepstral_mean_and_variance_normalization_trunc_max_length(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(8000, 14000, 2000)] inputs = feature_extractor( speech_inputs, padding="max_length", max_length=4, truncation=True, return_tensors="np", return_attention_mask=True, ) input_features = inputs.input_features attention_mask = inputs.attention_mask fbank_feat_lengths = np.sum(attention_mask == 1, axis=1) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]]) self._check_zero_mean_unit_variance(input_features[1]) self._check_zero_mean_unit_variance(input_features[2]) def test_cepstral_mean_and_variance_normalization_trunc_longest(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(8000, 14000, 2000)] inputs = feature_extractor( speech_inputs, padding="longest", max_length=4, truncation=True, return_tensors="np", return_attention_mask=True, ) input_features = inputs.input_features attention_mask = inputs.attention_mask fbank_feat_lengths = np.sum(attention_mask == 1, axis=1) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]]) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]]) self._check_zero_mean_unit_variance(input_features[2]) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape, (3, 4, 24)) speech_inputs = [floats_list((1, x))[0] for x in range(8000, 14000, 2000)] inputs = feature_extractor( speech_inputs, padding="longest", max_length=16, truncation=True, return_tensors="np", return_attention_mask=True, ) input_features = inputs.input_features attention_mask = inputs.attention_mask fbank_feat_lengths = np.sum(attention_mask == 1, axis=1) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]]) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]]) self._check_zero_mean_unit_variance(input_features[2]) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape, (3, 16, 24)) def test_double_precision_pad(self): import torch feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) np_speech_inputs = np.random.rand(100, 32).astype(np.float64) py_speech_inputs = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: np_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="np") self.assertTrue(np_processed.input_features.dtype == np.float32) pt_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt") self.assertTrue(pt_processed.input_features.dtype == torch.float32) def test_different_window(self): import torch init_dict = self.feat_extract_tester.prepare_feat_extract_dict() init_dict["win_function"] = "hann_window" feature_extractor = self.feature_extraction_class(**init_dict) np_speech_inputs = np.random.rand(100, 32).astype(np.float64) py_speech_inputs = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: np_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="np") self.assertTrue(np_processed.input_features.dtype == np.float32) pt_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt") self.assertTrue(pt_processed.input_features.dtype == torch.float32)
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./docs/source/en/model_doc/deberta.mdx
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # DeBERTa ## Overview The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen It is based on Google's BERT model released in 2018 and Facebook's RoBERTa model released in 2019. It builds on RoBERTa with disentangled attention and enhanced mask decoder training with half of the data used in RoBERTa. The abstract from the paper is the following: *Recent progress in pre-trained neural language models has significantly improved the performance of many natural language processing (NLP) tasks. In this paper we propose a new model architecture DeBERTa (Decoding-enhanced BERT with disentangled attention) that improves the BERT and RoBERTa models using two novel techniques. The first is the disentangled attention mechanism, where each word is represented using two vectors that encode its content and position, respectively, and the attention weights among words are computed using disentangled matrices on their contents and relative positions. Second, an enhanced mask decoder is used to replace the output softmax layer to predict the masked tokens for model pretraining. We show that these two techniques significantly improve the efficiency of model pretraining and performance of downstream tasks. Compared to RoBERTa-Large, a DeBERTa model trained on half of the training data performs consistently better on a wide range of NLP tasks, achieving improvements on MNLI by +0.9% (90.2% vs. 91.1%), on SQuAD v2.0 by +2.3% (88.4% vs. 90.7%) and RACE by +3.6% (83.2% vs. 86.8%). The DeBERTa code and pre-trained models will be made publicly available at https://github.com/microsoft/DeBERTa.* This model was contributed by [DeBERTa](https://huggingface.co/DeBERTa). This model TF 2.0 implementation was contributed by [kamalkraj](https://huggingface.co/kamalkraj) . The original code can be found [here](https://github.com/microsoft/DeBERTa). ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with DeBERTa. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. <PipelineTag pipeline="text-classification"/> - A blog post on how to [Accelerate Large Model Training using DeepSpeed](https://huggingface.co/blog/accelerate-deepspeed) with DeBERTa. - A blog post on [Supercharged Customer Service with Machine Learning](https://huggingface.co/blog/supercharge-customer-service-with-machine-learning) with DeBERTa. - [`DebertaForSequenceClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification.ipynb). - [`TFDebertaForSequenceClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/text-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification-tf.ipynb). <PipelineTag pipeline="token-classification" /> - [`DebertaForTokenClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/token-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification.ipynb). - [`TFDebertaForTokenClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/token-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb). - [Token classification](https://huggingface.co/course/chapter7/2?fw=pt) chapter of the 🤗 Hugging Face Course. - [Byte-Pair Encoding tokenization](https://huggingface.co/course/chapter6/5?fw=pt) chapter of the 🤗 Hugging Face Course. <PipelineTag pipeline="fill-mask"/> - [`DebertaForMaskedLM`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling#robertabertdistilbert-and-masked-language-modeling) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb). - [`TFDebertaForMaskedLM`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/language-modeling#run_mlmpy) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb). - [Masked language modeling](https://huggingface.co/course/chapter7/3?fw=pt) chapter of the 🤗 Hugging Face Course. <PipelineTag pipeline="question-answering"/> - [`DebertaForQuestionAnswering`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/question-answering) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering.ipynb). - [`TFDebertaForQuestionAnswering`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/question-answering) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb). - [Question answering](https://huggingface.co/course/chapter7/7?fw=pt) chapter of the 🤗 Hugging Face Course. ## DebertaConfig [[autodoc]] DebertaConfig ## DebertaTokenizer [[autodoc]] DebertaTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## DebertaTokenizerFast [[autodoc]] DebertaTokenizerFast - build_inputs_with_special_tokens - create_token_type_ids_from_sequences ## DebertaModel [[autodoc]] DebertaModel - forward ## DebertaPreTrainedModel [[autodoc]] DebertaPreTrainedModel ## DebertaForMaskedLM [[autodoc]] DebertaForMaskedLM - forward ## DebertaForSequenceClassification [[autodoc]] DebertaForSequenceClassification - forward ## DebertaForTokenClassification [[autodoc]] DebertaForTokenClassification - forward ## DebertaForQuestionAnswering [[autodoc]] DebertaForQuestionAnswering - forward ## TFDebertaModel [[autodoc]] TFDebertaModel - call ## TFDebertaPreTrainedModel [[autodoc]] TFDebertaPreTrainedModel - call ## TFDebertaForMaskedLM [[autodoc]] TFDebertaForMaskedLM - call ## TFDebertaForSequenceClassification [[autodoc]] TFDebertaForSequenceClassification - call ## TFDebertaForTokenClassification [[autodoc]] TFDebertaForTokenClassification - call ## TFDebertaForQuestionAnswering [[autodoc]] TFDebertaForQuestionAnswering - call
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # DeBERTa ## Overview The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen It is based on Google's BERT model released in 2018 and Facebook's RoBERTa model released in 2019. It builds on RoBERTa with disentangled attention and enhanced mask decoder training with half of the data used in RoBERTa. The abstract from the paper is the following: *Recent progress in pre-trained neural language models has significantly improved the performance of many natural language processing (NLP) tasks. In this paper we propose a new model architecture DeBERTa (Decoding-enhanced BERT with disentangled attention) that improves the BERT and RoBERTa models using two novel techniques. The first is the disentangled attention mechanism, where each word is represented using two vectors that encode its content and position, respectively, and the attention weights among words are computed using disentangled matrices on their contents and relative positions. Second, an enhanced mask decoder is used to replace the output softmax layer to predict the masked tokens for model pretraining. We show that these two techniques significantly improve the efficiency of model pretraining and performance of downstream tasks. Compared to RoBERTa-Large, a DeBERTa model trained on half of the training data performs consistently better on a wide range of NLP tasks, achieving improvements on MNLI by +0.9% (90.2% vs. 91.1%), on SQuAD v2.0 by +2.3% (88.4% vs. 90.7%) and RACE by +3.6% (83.2% vs. 86.8%). The DeBERTa code and pre-trained models will be made publicly available at https://github.com/microsoft/DeBERTa.* This model was contributed by [DeBERTa](https://huggingface.co/DeBERTa). This model TF 2.0 implementation was contributed by [kamalkraj](https://huggingface.co/kamalkraj) . The original code can be found [here](https://github.com/microsoft/DeBERTa). ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with DeBERTa. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. <PipelineTag pipeline="text-classification"/> - A blog post on how to [Accelerate Large Model Training using DeepSpeed](https://huggingface.co/blog/accelerate-deepspeed) with DeBERTa. - A blog post on [Supercharged Customer Service with Machine Learning](https://huggingface.co/blog/supercharge-customer-service-with-machine-learning) with DeBERTa. - [`DebertaForSequenceClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification.ipynb). - [`TFDebertaForSequenceClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/text-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification-tf.ipynb). <PipelineTag pipeline="token-classification" /> - [`DebertaForTokenClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/token-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification.ipynb). - [`TFDebertaForTokenClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/token-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb). - [Token classification](https://huggingface.co/course/chapter7/2?fw=pt) chapter of the 🤗 Hugging Face Course. - [Byte-Pair Encoding tokenization](https://huggingface.co/course/chapter6/5?fw=pt) chapter of the 🤗 Hugging Face Course. <PipelineTag pipeline="fill-mask"/> - [`DebertaForMaskedLM`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling#robertabertdistilbert-and-masked-language-modeling) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb). - [`TFDebertaForMaskedLM`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/language-modeling#run_mlmpy) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb). - [Masked language modeling](https://huggingface.co/course/chapter7/3?fw=pt) chapter of the 🤗 Hugging Face Course. <PipelineTag pipeline="question-answering"/> - [`DebertaForQuestionAnswering`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/question-answering) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering.ipynb). - [`TFDebertaForQuestionAnswering`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/question-answering) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb). - [Question answering](https://huggingface.co/course/chapter7/7?fw=pt) chapter of the 🤗 Hugging Face Course. ## DebertaConfig [[autodoc]] DebertaConfig ## DebertaTokenizer [[autodoc]] DebertaTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## DebertaTokenizerFast [[autodoc]] DebertaTokenizerFast - build_inputs_with_special_tokens - create_token_type_ids_from_sequences ## DebertaModel [[autodoc]] DebertaModel - forward ## DebertaPreTrainedModel [[autodoc]] DebertaPreTrainedModel ## DebertaForMaskedLM [[autodoc]] DebertaForMaskedLM - forward ## DebertaForSequenceClassification [[autodoc]] DebertaForSequenceClassification - forward ## DebertaForTokenClassification [[autodoc]] DebertaForTokenClassification - forward ## DebertaForQuestionAnswering [[autodoc]] DebertaForQuestionAnswering - forward ## TFDebertaModel [[autodoc]] TFDebertaModel - call ## TFDebertaPreTrainedModel [[autodoc]] TFDebertaPreTrainedModel - call ## TFDebertaForMaskedLM [[autodoc]] TFDebertaForMaskedLM - call ## TFDebertaForSequenceClassification [[autodoc]] TFDebertaForSequenceClassification - call ## TFDebertaForTokenClassification [[autodoc]] TFDebertaForTokenClassification - call ## TFDebertaForQuestionAnswering [[autodoc]] TFDebertaForQuestionAnswering - call
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./src/transformers/models/albert/modeling_tf_albert.py
# coding=utf-8 # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 ALBERT model.""" import math from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import ( TFBaseModelOutput, TFBaseModelOutputWithPooling, TFMaskedLMOutput, TFMultipleChoiceModelOutput, TFQuestionAnsweringModelOutput, TFSequenceClassifierOutput, TFTokenClassifierOutput, ) from ...modeling_tf_utils import ( TFMaskedLanguageModelingLoss, TFModelInputType, TFMultipleChoiceLoss, TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFTokenClassificationLoss, get_initializer, keras_serializable, unpack_inputs, ) from ...tf_utils import shape_list, stable_softmax from ...utils import ( MULTIPLE_CHOICE_DUMMY_INPUTS, ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_albert import AlbertConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "albert-base-v2" _CONFIG_FOR_DOC = "AlbertConfig" _TOKENIZER_FOR_DOC = "AlbertTokenizer" TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "albert-base-v1", "albert-large-v1", "albert-xlarge-v1", "albert-xxlarge-v1", "albert-base-v2", "albert-large-v2", "albert-xlarge-v2", "albert-xxlarge-v2", # See all ALBERT models at https://huggingface.co/models?filter=albert ] class TFAlbertPreTrainingLoss: """ Loss function suitable for ALBERT pretraining, that is, the task of pretraining a language model by combining SOP + MLM. .. note:: Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. """ def hf_compute_loss(self, labels: tf.Tensor, logits: tf.Tensor) -> tf.Tensor: loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=tf.keras.losses.Reduction.NONE ) if self.config.tf_legacy_loss: # make sure only labels that are not equal to -100 # are taken into account as loss masked_lm_active_loss = tf.not_equal(tf.reshape(tensor=labels["labels"], shape=(-1,)), -100) masked_lm_reduced_logits = tf.boolean_mask( tensor=tf.reshape(tensor=logits[0], shape=(-1, shape_list(logits[0])[2])), mask=masked_lm_active_loss, ) masked_lm_labels = tf.boolean_mask( tensor=tf.reshape(tensor=labels["labels"], shape=(-1,)), mask=masked_lm_active_loss ) sentence_order_active_loss = tf.not_equal( tf.reshape(tensor=labels["sentence_order_label"], shape=(-1,)), -100 ) sentence_order_reduced_logits = tf.boolean_mask( tensor=tf.reshape(tensor=logits[1], shape=(-1, 2)), mask=sentence_order_active_loss ) sentence_order_label = tf.boolean_mask( tensor=tf.reshape(tensor=labels["sentence_order_label"], shape=(-1,)), mask=sentence_order_active_loss ) masked_lm_loss = loss_fn(y_true=masked_lm_labels, y_pred=masked_lm_reduced_logits) sentence_order_loss = loss_fn(y_true=sentence_order_label, y_pred=sentence_order_reduced_logits) masked_lm_loss = tf.reshape(tensor=masked_lm_loss, shape=(-1, shape_list(sentence_order_loss)[0])) masked_lm_loss = tf.reduce_mean(input_tensor=masked_lm_loss, axis=0) return masked_lm_loss + sentence_order_loss # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway unmasked_lm_losses = loss_fn(y_true=tf.nn.relu(labels["labels"]), y_pred=logits[0]) # make sure only labels that are not equal to -100 # are taken into account for the loss computation lm_loss_mask = tf.cast(labels["labels"] != -100, dtype=unmasked_lm_losses.dtype) masked_lm_losses = unmasked_lm_losses * lm_loss_mask reduced_masked_lm_loss = tf.reduce_sum(masked_lm_losses) / tf.reduce_sum(lm_loss_mask) sop_logits = tf.reshape(logits[1], (-1, 2)) # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway unmasked_sop_loss = loss_fn(y_true=tf.nn.relu(labels["sentence_order_label"]), y_pred=sop_logits) sop_loss_mask = tf.cast(labels["sentence_order_label"] != -100, dtype=unmasked_sop_loss.dtype) masked_sop_loss = unmasked_sop_loss * sop_loss_mask reduced_masked_sop_loss = tf.reduce_sum(masked_sop_loss) / tf.reduce_sum(sop_loss_mask) return tf.reshape(reduced_masked_lm_loss + reduced_masked_sop_loss, (1,)) class TFAlbertEmbeddings(tf.keras.layers.Layer): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config: AlbertConfig, **kwargs): super().__init__(**kwargs) self.vocab_size = config.vocab_size self.type_vocab_size = config.type_vocab_size self.embedding_size = config.embedding_size self.max_position_embeddings = config.max_position_embeddings self.initializer_range = config.initializer_range self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) def build(self, input_shape: tf.TensorShape): with tf.name_scope("word_embeddings"): self.weight = self.add_weight( name="weight", shape=[self.vocab_size, self.embedding_size], initializer=get_initializer(self.initializer_range), ) with tf.name_scope("token_type_embeddings"): self.token_type_embeddings = self.add_weight( name="embeddings", shape=[self.type_vocab_size, self.embedding_size], initializer=get_initializer(self.initializer_range), ) with tf.name_scope("position_embeddings"): self.position_embeddings = self.add_weight( name="embeddings", shape=[self.max_position_embeddings, self.embedding_size], initializer=get_initializer(self.initializer_range), ) super().build(input_shape) # Copied from transformers.models.bert.modeling_tf_bert.TFBertEmbeddings.call def call( self, input_ids: tf.Tensor = None, position_ids: tf.Tensor = None, token_type_ids: tf.Tensor = None, inputs_embeds: tf.Tensor = None, past_key_values_length=0, training: bool = False, ) -> tf.Tensor: """ Applies embedding based on inputs tensor. Returns: final_embeddings (`tf.Tensor`): output embedding tensor. """ if input_ids is None and inputs_embeds is None: raise ValueError("Need to provide either `input_ids` or `input_embeds`.") if input_ids is not None: # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound # indices on GPU, returning zeros instead. This is a dangerous silent behavior. tf.debugging.assert_less( input_ids, tf.cast(self.vocab_size, dtype=input_ids.dtype), message=( "input_ids must be smaller than the embedding layer's input dimension (got" f" {tf.math.reduce_max(input_ids)} >= {self.vocab_size})" ), ) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) if position_ids is None: position_ids = tf.expand_dims( tf.range(start=past_key_values_length, limit=input_shape[1] + past_key_values_length), axis=0 ) position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids) final_embeddings = inputs_embeds + position_embeds + token_type_embeds final_embeddings = self.LayerNorm(inputs=final_embeddings) final_embeddings = self.dropout(inputs=final_embeddings, training=training) return final_embeddings class TFAlbertAttention(tf.keras.layers.Layer): """Contains the complete attention sublayer, including both dropouts and layer norm.""" def __init__(self, config: AlbertConfig, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number " f"of attention heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.sqrt_att_head_size = math.sqrt(self.attention_head_size) self.output_attentions = config.output_attentions self.query = tf.keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query" ) self.key = tf.keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key" ) self.value = tf.keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value" ) self.dense = tf.keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") # Two different dropout probabilities; see https://github.com/google-research/albert/blob/master/modeling.py#L971-L993 self.attention_dropout = tf.keras.layers.Dropout(rate=config.attention_probs_dropout_prob) self.output_dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor: # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size] tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size)) # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size] return tf.transpose(tensor, perm=[0, 2, 1, 3]) def call( self, input_tensor: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, training: bool = False, ) -> Tuple[tf.Tensor]: batch_size = shape_list(input_tensor)[0] mixed_query_layer = self.query(inputs=input_tensor) mixed_key_layer = self.key(inputs=input_tensor) mixed_value_layer = self.value(inputs=input_tensor) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) # Take the dot product between "query" and "key" to get the raw attention scores. # (batch size, num_heads, seq_len_q, seq_len_k) attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype) attention_scores = tf.divide(attention_scores, dk) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in TFAlbertModel call() function) attention_scores = tf.add(attention_scores, attention_mask) # Normalize the attention scores to probabilities. attention_probs = stable_softmax(logits=attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.attention_dropout(inputs=attention_probs, training=training) # Mask heads if we want to if head_mask is not None: attention_probs = tf.multiply(attention_probs, head_mask) context_layer = tf.matmul(attention_probs, value_layer) context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, all_head_size) context_layer = tf.reshape(tensor=context_layer, shape=(batch_size, -1, self.all_head_size)) self_outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) hidden_states = self_outputs[0] hidden_states = self.dense(inputs=hidden_states) hidden_states = self.output_dropout(inputs=hidden_states, training=training) attention_output = self.LayerNorm(inputs=hidden_states + input_tensor) # add attentions if we output them outputs = (attention_output,) + self_outputs[1:] return outputs class TFAlbertLayer(tf.keras.layers.Layer): def __init__(self, config: AlbertConfig, **kwargs): super().__init__(**kwargs) self.attention = TFAlbertAttention(config, name="attention") self.ffn = tf.keras.layers.Dense( units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="ffn" ) if isinstance(config.hidden_act, str): self.activation = get_tf_activation(config.hidden_act) else: self.activation = config.hidden_act self.ffn_output = tf.keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="ffn_output" ) self.full_layer_layer_norm = tf.keras.layers.LayerNormalization( epsilon=config.layer_norm_eps, name="full_layer_layer_norm" ) self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, training: bool = False, ) -> Tuple[tf.Tensor]: attention_outputs = self.attention( input_tensor=hidden_states, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, training=training, ) ffn_output = self.ffn(inputs=attention_outputs[0]) ffn_output = self.activation(ffn_output) ffn_output = self.ffn_output(inputs=ffn_output) ffn_output = self.dropout(inputs=ffn_output, training=training) hidden_states = self.full_layer_layer_norm(inputs=ffn_output + attention_outputs[0]) # add attentions if we output them outputs = (hidden_states,) + attention_outputs[1:] return outputs class TFAlbertLayerGroup(tf.keras.layers.Layer): def __init__(self, config: AlbertConfig, **kwargs): super().__init__(**kwargs) self.albert_layers = [ TFAlbertLayer(config, name=f"albert_layers_._{i}") for i in range(config.inner_group_num) ] def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, output_hidden_states: bool, training: bool = False, ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: layer_hidden_states = () if output_hidden_states else None layer_attentions = () if output_attentions else None for layer_index, albert_layer in enumerate(self.albert_layers): if output_hidden_states: layer_hidden_states = layer_hidden_states + (hidden_states,) layer_output = albert_layer( hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask[layer_index], output_attentions=output_attentions, training=training, ) hidden_states = layer_output[0] if output_attentions: layer_attentions = layer_attentions + (layer_output[1],) # Add last layer if output_hidden_states: layer_hidden_states = layer_hidden_states + (hidden_states,) return tuple(v for v in [hidden_states, layer_hidden_states, layer_attentions] if v is not None) class TFAlbertTransformer(tf.keras.layers.Layer): def __init__(self, config: AlbertConfig, **kwargs): super().__init__(**kwargs) self.num_hidden_layers = config.num_hidden_layers self.num_hidden_groups = config.num_hidden_groups # Number of layers in a hidden group self.layers_per_group = int(config.num_hidden_layers / config.num_hidden_groups) self.embedding_hidden_mapping_in = tf.keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="embedding_hidden_mapping_in", ) self.albert_layer_groups = [ TFAlbertLayerGroup(config, name=f"albert_layer_groups_._{i}") for i in range(config.num_hidden_groups) ] def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, output_hidden_states: bool, return_dict: bool, training: bool = False, ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: hidden_states = self.embedding_hidden_mapping_in(inputs=hidden_states) all_attentions = () if output_attentions else None all_hidden_states = (hidden_states,) if output_hidden_states else None for i in range(self.num_hidden_layers): # Index of the hidden group group_idx = int(i / (self.num_hidden_layers / self.num_hidden_groups)) layer_group_output = self.albert_layer_groups[group_idx]( hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask[group_idx * self.layers_per_group : (group_idx + 1) * self.layers_per_group], output_attentions=output_attentions, output_hidden_states=output_hidden_states, training=training, ) hidden_states = layer_group_output[0] if output_attentions: all_attentions = all_attentions + layer_group_output[-1] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) class TFAlbertPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = AlbertConfig base_model_prefix = "albert" class TFAlbertMLMHead(tf.keras.layers.Layer): def __init__(self, config: AlbertConfig, input_embeddings: tf.keras.layers.Layer, **kwargs): super().__init__(**kwargs) self.vocab_size = config.vocab_size self.embedding_size = config.embedding_size self.dense = tf.keras.layers.Dense( config.embedding_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) if isinstance(config.hidden_act, str): self.activation = get_tf_activation(config.hidden_act) else: self.activation = config.hidden_act self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = input_embeddings def build(self, input_shape: tf.TensorShape): self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias") self.decoder_bias = self.add_weight( shape=(self.vocab_size,), initializer="zeros", trainable=True, name="decoder/bias" ) super().build(input_shape) def get_output_embeddings(self) -> tf.keras.layers.Layer: return self.decoder def set_output_embeddings(self, value: tf.Variable): self.decoder.weight = value self.decoder.vocab_size = shape_list(value)[0] def get_bias(self) -> Dict[str, tf.Variable]: return {"bias": self.bias, "decoder_bias": self.decoder_bias} def set_bias(self, value: tf.Variable): self.bias = value["bias"] self.decoder_bias = value["decoder_bias"] self.vocab_size = shape_list(value["bias"])[0] def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.LayerNorm(inputs=hidden_states) seq_length = shape_list(tensor=hidden_states)[1] hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size]) hidden_states = tf.matmul(a=hidden_states, b=self.decoder.weight, transpose_b=True) hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.vocab_size]) hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.decoder_bias) return hidden_states @keras_serializable class TFAlbertMainLayer(tf.keras.layers.Layer): config_class = AlbertConfig def __init__(self, config: AlbertConfig, add_pooling_layer: bool = True, **kwargs): super().__init__(**kwargs) self.config = config self.embeddings = TFAlbertEmbeddings(config, name="embeddings") self.encoder = TFAlbertTransformer(config, name="encoder") self.pooler = ( tf.keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation="tanh", name="pooler", ) if add_pooling_layer else None ) def get_input_embeddings(self) -> tf.keras.layers.Layer: return self.embeddings def set_input_embeddings(self, value: tf.Variable): self.embeddings.weight = value self.embeddings.vocab_size = shape_list(value)[0] def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ raise NotImplementedError @unpack_inputs def call( self, input_ids: Optional[TFModelInputType] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if attention_mask is None: attention_mask = tf.fill(dims=input_shape, value=1) if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, training=training, ) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1])) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype) one_cst = tf.constant(1.0, dtype=embedding_output.dtype) ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype) extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.config.num_hidden_layers encoder_outputs = self.encoder( hidden_states=embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(inputs=sequence_output[:, 0]) if self.pooler is not None else None if not return_dict: return ( sequence_output, pooled_output, ) + encoder_outputs[1:] return TFBaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @dataclass class TFAlbertForPreTrainingOutput(ModelOutput): """ Output type of [`TFAlbertForPreTraining`]. Args: prediction_logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). sop_logits (`tf.Tensor` of shape `(batch_size, 2)`): Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor = None prediction_logits: tf.Tensor = None sop_logits: tf.Tensor = None hidden_states: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[tf.Tensor]] = None ALBERT_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Args: config ([`AlbertConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ ALBERT_INPUTS_DOCSTRING = r""" Args: input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AlbertTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare Albert Model transformer outputting raw hidden-states without any specific head on top.", ALBERT_START_DOCSTRING, ) class TFAlbertModel(TFAlbertPreTrainedModel): def __init__(self, config: AlbertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.albert = TFAlbertMainLayer(config, name="albert") @unpack_inputs @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: Optional[TFModelInputType] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: outputs = self.albert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def serving_output(self, output: TFBaseModelOutputWithPooling) -> TFBaseModelOutputWithPooling: hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFBaseModelOutputWithPooling( last_hidden_state=output.last_hidden_state, pooler_output=output.pooler_output, hidden_states=hs, attentions=attns, ) @add_start_docstrings( """ Albert Model with two heads on top for pretraining: a `masked language modeling` head and a `sentence order prediction` (classification) head. """, ALBERT_START_DOCSTRING, ) class TFAlbertForPreTraining(TFAlbertPreTrainedModel, TFAlbertPreTrainingLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"predictions.decoder.weight"] def __init__(self, config: AlbertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.albert = TFAlbertMainLayer(config, name="albert") self.predictions = TFAlbertMLMHead(config, input_embeddings=self.albert.embeddings, name="predictions") self.sop_classifier = TFAlbertSOPHead(config, name="sop_classifier") def get_lm_head(self) -> tf.keras.layers.Layer: return self.predictions @unpack_inputs @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFAlbertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: Optional[TFModelInputType] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[Union[np.ndarray, tf.Tensor]] = None, sentence_order_label: Optional[Union[np.ndarray, tf.Tensor]] = None, training: Optional[bool] = False, ) -> Union[TFAlbertForPreTrainingOutput, Tuple[tf.Tensor]]: r""" Return: Example: ```python >>> import tensorflow as tf >>> from transformers import AlbertTokenizer, TFAlbertForPreTraining >>> tokenizer = AlbertTokenizer.from_pretrained("albert-base-v2") >>> model = TFAlbertForPreTraining.from_pretrained("albert-base-v2") >>> input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] >>> # Batch size 1 >>> outputs = model(input_ids) >>> prediction_logits = outputs.prediction_logits >>> sop_logits = outputs.sop_logits ```""" outputs = self.albert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output, pooled_output = outputs[:2] prediction_scores = self.predictions(hidden_states=sequence_output) sop_scores = self.sop_classifier(pooled_output=pooled_output, training=training) total_loss = None if labels is not None and sentence_order_label is not None: d_labels = {"labels": labels} d_labels["sentence_order_label"] = sentence_order_label total_loss = self.hf_compute_loss(labels=d_labels, logits=(prediction_scores, sop_scores)) if not return_dict: output = (prediction_scores, sop_scores) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return TFAlbertForPreTrainingOutput( loss=total_loss, prediction_logits=prediction_scores, sop_logits=sop_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def serving_output(self, output: TFAlbertForPreTrainingOutput) -> TFAlbertForPreTrainingOutput: hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFAlbertForPreTrainingOutput( prediction_logits=output.prediction_logits, sop_logits=output.sop_logits, hidden_states=hs, attentions=attns, ) class TFAlbertSOPHead(tf.keras.layers.Layer): def __init__(self, config: AlbertConfig, **kwargs): super().__init__(**kwargs) self.dropout = tf.keras.layers.Dropout(rate=config.classifier_dropout_prob) self.classifier = tf.keras.layers.Dense( units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier", ) def call(self, pooled_output: tf.Tensor, training: bool) -> tf.Tensor: dropout_pooled_output = self.dropout(inputs=pooled_output, training=training) logits = self.classifier(inputs=dropout_pooled_output) return logits @add_start_docstrings("""Albert Model with a `language modeling` head on top.""", ALBERT_START_DOCSTRING) class TFAlbertForMaskedLM(TFAlbertPreTrainedModel, TFMaskedLanguageModelingLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions.decoder.weight"] def __init__(self, config: AlbertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.albert = TFAlbertMainLayer(config, add_pooling_layer=False, name="albert") self.predictions = TFAlbertMLMHead(config, input_embeddings=self.albert.embeddings, name="predictions") def get_lm_head(self) -> tf.keras.layers.Layer: return self.predictions @unpack_inputs @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: Optional[TFModelInputType] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[Union[np.ndarray, tf.Tensor]] = None, training: Optional[bool] = False, ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` Returns: Example: ```python >>> import tensorflow as tf >>> from transformers import AlbertTokenizer, TFAlbertForMaskedLM >>> tokenizer = AlbertTokenizer.from_pretrained("albert-base-v2") >>> model = TFAlbertForMaskedLM.from_pretrained("albert-base-v2") >>> # add mask_token >>> inputs = tokenizer(f"The capital of [MASK] is Paris.", return_tensors="tf") >>> logits = model(**inputs).logits >>> # retrieve index of [MASK] >>> mask_token_index = tf.where(inputs.input_ids == tokenizer.mask_token_id)[0][1] >>> predicted_token_id = tf.math.argmax(logits[0, mask_token_index], axis=-1) >>> tokenizer.decode(predicted_token_id) 'france' ``` ```python >>> labels = tokenizer("The capital of France is Paris.", return_tensors="tf")["input_ids"] >>> labels = tf.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100) >>> outputs = model(**inputs, labels=labels) >>> round(float(outputs.loss), 2) 0.81 ``` """ outputs = self.albert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] prediction_scores = self.predictions(hidden_states=sequence_output, training=training) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=prediction_scores) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFMaskedLMOutput( loss=loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) # Copied from transformers.models.bert.modeling_tf_bert.TFBertForMaskedLM.serving_output def serving_output(self, output: TFMaskedLMOutput) -> TFMaskedLMOutput: hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFMaskedLMOutput(logits=output.logits, hidden_states=hs, attentions=attns) @add_start_docstrings( """ Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, ALBERT_START_DOCSTRING, ) class TFAlbertForSequenceClassification(TFAlbertPreTrainedModel, TFSequenceClassificationLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"predictions"] _keys_to_ignore_on_load_missing = [r"dropout"] def __init__(self, config: AlbertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.albert = TFAlbertMainLayer(config, name="albert") self.dropout = tf.keras.layers.Dropout(rate=config.classifier_dropout_prob) self.classifier = tf.keras.layers.Dense( units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) @unpack_inputs @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint="vumichien/albert-base-v2-imdb", output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output="'LABEL_1'", expected_loss=0.12, ) def call( self, input_ids: Optional[TFModelInputType] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[Union[np.ndarray, tf.Tensor]] = None, training: Optional[bool] = False, ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ outputs = self.albert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) pooled_output = outputs[1] pooled_output = self.dropout(inputs=pooled_output, training=training) logits = self.classifier(inputs=pooled_output) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) # Copied from transformers.models.bert.modeling_tf_bert.TFBertForSequenceClassification.serving_output def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput: hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFSequenceClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns) @add_start_docstrings( """ Albert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, ALBERT_START_DOCSTRING, ) class TFAlbertForTokenClassification(TFAlbertPreTrainedModel, TFTokenClassificationLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions"] _keys_to_ignore_on_load_missing = [r"dropout"] def __init__(self, config: AlbertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.albert = TFAlbertMainLayer(config, add_pooling_layer=False, name="albert") classifier_dropout_prob = ( config.classifier_dropout_prob if config.classifier_dropout_prob is not None else config.hidden_dropout_prob ) self.dropout = tf.keras.layers.Dropout(rate=classifier_dropout_prob) self.classifier = tf.keras.layers.Dense( units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) @unpack_inputs @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint="vumichien/tiny-albert", output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=( "['LABEL_1', 'LABEL_1', 'LABEL_1', 'LABEL_0', 'LABEL_1', 'LABEL_0', 'LABEL_1', 'LABEL_1', " "'LABEL_0', 'LABEL_1', 'LABEL_0', 'LABEL_0', 'LABEL_1', 'LABEL_1']" ), expected_loss=0.66, ) def call( self, input_ids: Optional[TFModelInputType] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[Union[np.ndarray, tf.Tensor]] = None, training: Optional[bool] = False, ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ outputs = self.albert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] sequence_output = self.dropout(inputs=sequence_output, training=training) logits = self.classifier(inputs=sequence_output) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFTokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) # Copied from transformers.models.bert.modeling_tf_bert.TFBertForTokenClassification.serving_output def serving_output(self, output: TFTokenClassifierOutput) -> TFTokenClassifierOutput: hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFTokenClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns) @add_start_docstrings( """ Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`). """, ALBERT_START_DOCSTRING, ) class TFAlbertForQuestionAnswering(TFAlbertPreTrainedModel, TFQuestionAnsweringLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions"] def __init__(self, config: AlbertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.albert = TFAlbertMainLayer(config, add_pooling_layer=False, name="albert") self.qa_outputs = tf.keras.layers.Dense( units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs" ) @unpack_inputs @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint="vumichien/albert-base-v2-squad2", output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, qa_target_start_index=12, qa_target_end_index=13, expected_output="'a nice puppet'", expected_loss=7.36, ) def call( self, input_ids: Optional[TFModelInputType] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, start_positions: Optional[Union[np.ndarray, tf.Tensor]] = None, end_positions: Optional[Union[np.ndarray, tf.Tensor]] = None, training: Optional[bool] = False, ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]: r""" start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ outputs = self.albert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] logits = self.qa_outputs(inputs=sequence_output) start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1) start_logits = tf.squeeze(input=start_logits, axis=-1) end_logits = tf.squeeze(input=end_logits, axis=-1) loss = None if start_positions is not None and end_positions is not None: labels = {"start_position": start_positions} labels["end_position"] = end_positions loss = self.hf_compute_loss(labels=labels, logits=(start_logits, end_logits)) if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFQuestionAnsweringModelOutput( loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) # Copied from transformers.models.bert.modeling_tf_bert.TFBertForQuestionAnswering.serving_output def serving_output(self, output: TFQuestionAnsweringModelOutput) -> TFQuestionAnsweringModelOutput: hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFQuestionAnsweringModelOutput( start_logits=output.start_logits, end_logits=output.end_logits, hidden_states=hs, attentions=attns ) @add_start_docstrings( """ Albert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, ALBERT_START_DOCSTRING, ) class TFAlbertForMultipleChoice(TFAlbertPreTrainedModel, TFMultipleChoiceLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions"] _keys_to_ignore_on_load_missing = [r"dropout"] def __init__(self, config: AlbertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.albert = TFAlbertMainLayer(config, name="albert") self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) self.classifier = tf.keras.layers.Dense( units=1, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) @property def dummy_inputs(self): """ Dummy inputs to build the network. Returns: tf.Tensor with dummy inputs """ return {"input_ids": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS)} @unpack_inputs @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: Optional[TFModelInputType] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[Union[np.ndarray, tf.Tensor]] = None, training: Optional[bool] = False, ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ if input_ids is not None: num_choices = shape_list(input_ids)[1] seq_length = shape_list(input_ids)[2] else: num_choices = shape_list(inputs_embeds)[1] seq_length = shape_list(inputs_embeds)[2] flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = ( tf.reshape(tensor=attention_mask, shape=(-1, seq_length)) if attention_mask is not None else None ) flat_token_type_ids = ( tf.reshape(tensor=token_type_ids, shape=(-1, seq_length)) if token_type_ids is not None else None ) flat_position_ids = ( tf.reshape(tensor=position_ids, shape=(-1, seq_length)) if position_ids is not None else None ) flat_inputs_embeds = ( tf.reshape(tensor=inputs_embeds, shape=(-1, seq_length, shape_list(inputs_embeds)[3])) if inputs_embeds is not None else None ) outputs = self.albert( input_ids=flat_input_ids, attention_mask=flat_attention_mask, token_type_ids=flat_token_type_ids, position_ids=flat_position_ids, head_mask=head_mask, inputs_embeds=flat_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) pooled_output = outputs[1] pooled_output = self.dropout(inputs=pooled_output, training=training) logits = self.classifier(inputs=pooled_output) reshaped_logits = tf.reshape(tensor=logits, shape=(-1, num_choices)) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=reshaped_logits) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFMultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @tf.function( input_signature=[ { "input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"), "attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"), "token_type_ids": tf.TensorSpec((None, None, None), tf.int32, name="token_type_ids"), } ] ) # Copied from transformers.models.bert.modeling_tf_bert.TFBertForMultipleChoice.serving def serving(self, inputs: Dict[str, tf.Tensor]) -> TFMultipleChoiceModelOutput: output = self.call(input_ids=inputs) return self.serving_output(output) # Copied from transformers.models.bert.modeling_tf_bert.TFBertForMultipleChoice.serving_output def serving_output(self, output: TFMultipleChoiceModelOutput) -> TFMultipleChoiceModelOutput: hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFMultipleChoiceModelOutput(logits=output.logits, hidden_states=hs, attentions=attns)
# coding=utf-8 # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 ALBERT model.""" import math from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import ( TFBaseModelOutput, TFBaseModelOutputWithPooling, TFMaskedLMOutput, TFMultipleChoiceModelOutput, TFQuestionAnsweringModelOutput, TFSequenceClassifierOutput, TFTokenClassifierOutput, ) from ...modeling_tf_utils import ( TFMaskedLanguageModelingLoss, TFModelInputType, TFMultipleChoiceLoss, TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFTokenClassificationLoss, get_initializer, keras_serializable, unpack_inputs, ) from ...tf_utils import shape_list, stable_softmax from ...utils import ( MULTIPLE_CHOICE_DUMMY_INPUTS, ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_albert import AlbertConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "albert-base-v2" _CONFIG_FOR_DOC = "AlbertConfig" _TOKENIZER_FOR_DOC = "AlbertTokenizer" TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "albert-base-v1", "albert-large-v1", "albert-xlarge-v1", "albert-xxlarge-v1", "albert-base-v2", "albert-large-v2", "albert-xlarge-v2", "albert-xxlarge-v2", # See all ALBERT models at https://huggingface.co/models?filter=albert ] class TFAlbertPreTrainingLoss: """ Loss function suitable for ALBERT pretraining, that is, the task of pretraining a language model by combining SOP + MLM. .. note:: Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. """ def hf_compute_loss(self, labels: tf.Tensor, logits: tf.Tensor) -> tf.Tensor: loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=tf.keras.losses.Reduction.NONE ) if self.config.tf_legacy_loss: # make sure only labels that are not equal to -100 # are taken into account as loss masked_lm_active_loss = tf.not_equal(tf.reshape(tensor=labels["labels"], shape=(-1,)), -100) masked_lm_reduced_logits = tf.boolean_mask( tensor=tf.reshape(tensor=logits[0], shape=(-1, shape_list(logits[0])[2])), mask=masked_lm_active_loss, ) masked_lm_labels = tf.boolean_mask( tensor=tf.reshape(tensor=labels["labels"], shape=(-1,)), mask=masked_lm_active_loss ) sentence_order_active_loss = tf.not_equal( tf.reshape(tensor=labels["sentence_order_label"], shape=(-1,)), -100 ) sentence_order_reduced_logits = tf.boolean_mask( tensor=tf.reshape(tensor=logits[1], shape=(-1, 2)), mask=sentence_order_active_loss ) sentence_order_label = tf.boolean_mask( tensor=tf.reshape(tensor=labels["sentence_order_label"], shape=(-1,)), mask=sentence_order_active_loss ) masked_lm_loss = loss_fn(y_true=masked_lm_labels, y_pred=masked_lm_reduced_logits) sentence_order_loss = loss_fn(y_true=sentence_order_label, y_pred=sentence_order_reduced_logits) masked_lm_loss = tf.reshape(tensor=masked_lm_loss, shape=(-1, shape_list(sentence_order_loss)[0])) masked_lm_loss = tf.reduce_mean(input_tensor=masked_lm_loss, axis=0) return masked_lm_loss + sentence_order_loss # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway unmasked_lm_losses = loss_fn(y_true=tf.nn.relu(labels["labels"]), y_pred=logits[0]) # make sure only labels that are not equal to -100 # are taken into account for the loss computation lm_loss_mask = tf.cast(labels["labels"] != -100, dtype=unmasked_lm_losses.dtype) masked_lm_losses = unmasked_lm_losses * lm_loss_mask reduced_masked_lm_loss = tf.reduce_sum(masked_lm_losses) / tf.reduce_sum(lm_loss_mask) sop_logits = tf.reshape(logits[1], (-1, 2)) # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway unmasked_sop_loss = loss_fn(y_true=tf.nn.relu(labels["sentence_order_label"]), y_pred=sop_logits) sop_loss_mask = tf.cast(labels["sentence_order_label"] != -100, dtype=unmasked_sop_loss.dtype) masked_sop_loss = unmasked_sop_loss * sop_loss_mask reduced_masked_sop_loss = tf.reduce_sum(masked_sop_loss) / tf.reduce_sum(sop_loss_mask) return tf.reshape(reduced_masked_lm_loss + reduced_masked_sop_loss, (1,)) class TFAlbertEmbeddings(tf.keras.layers.Layer): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config: AlbertConfig, **kwargs): super().__init__(**kwargs) self.vocab_size = config.vocab_size self.type_vocab_size = config.type_vocab_size self.embedding_size = config.embedding_size self.max_position_embeddings = config.max_position_embeddings self.initializer_range = config.initializer_range self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) def build(self, input_shape: tf.TensorShape): with tf.name_scope("word_embeddings"): self.weight = self.add_weight( name="weight", shape=[self.vocab_size, self.embedding_size], initializer=get_initializer(self.initializer_range), ) with tf.name_scope("token_type_embeddings"): self.token_type_embeddings = self.add_weight( name="embeddings", shape=[self.type_vocab_size, self.embedding_size], initializer=get_initializer(self.initializer_range), ) with tf.name_scope("position_embeddings"): self.position_embeddings = self.add_weight( name="embeddings", shape=[self.max_position_embeddings, self.embedding_size], initializer=get_initializer(self.initializer_range), ) super().build(input_shape) # Copied from transformers.models.bert.modeling_tf_bert.TFBertEmbeddings.call def call( self, input_ids: tf.Tensor = None, position_ids: tf.Tensor = None, token_type_ids: tf.Tensor = None, inputs_embeds: tf.Tensor = None, past_key_values_length=0, training: bool = False, ) -> tf.Tensor: """ Applies embedding based on inputs tensor. Returns: final_embeddings (`tf.Tensor`): output embedding tensor. """ if input_ids is None and inputs_embeds is None: raise ValueError("Need to provide either `input_ids` or `input_embeds`.") if input_ids is not None: # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound # indices on GPU, returning zeros instead. This is a dangerous silent behavior. tf.debugging.assert_less( input_ids, tf.cast(self.vocab_size, dtype=input_ids.dtype), message=( "input_ids must be smaller than the embedding layer's input dimension (got" f" {tf.math.reduce_max(input_ids)} >= {self.vocab_size})" ), ) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) if position_ids is None: position_ids = tf.expand_dims( tf.range(start=past_key_values_length, limit=input_shape[1] + past_key_values_length), axis=0 ) position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids) final_embeddings = inputs_embeds + position_embeds + token_type_embeds final_embeddings = self.LayerNorm(inputs=final_embeddings) final_embeddings = self.dropout(inputs=final_embeddings, training=training) return final_embeddings class TFAlbertAttention(tf.keras.layers.Layer): """Contains the complete attention sublayer, including both dropouts and layer norm.""" def __init__(self, config: AlbertConfig, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number " f"of attention heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.sqrt_att_head_size = math.sqrt(self.attention_head_size) self.output_attentions = config.output_attentions self.query = tf.keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query" ) self.key = tf.keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key" ) self.value = tf.keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value" ) self.dense = tf.keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") # Two different dropout probabilities; see https://github.com/google-research/albert/blob/master/modeling.py#L971-L993 self.attention_dropout = tf.keras.layers.Dropout(rate=config.attention_probs_dropout_prob) self.output_dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor: # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size] tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size)) # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size] return tf.transpose(tensor, perm=[0, 2, 1, 3]) def call( self, input_tensor: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, training: bool = False, ) -> Tuple[tf.Tensor]: batch_size = shape_list(input_tensor)[0] mixed_query_layer = self.query(inputs=input_tensor) mixed_key_layer = self.key(inputs=input_tensor) mixed_value_layer = self.value(inputs=input_tensor) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) # Take the dot product between "query" and "key" to get the raw attention scores. # (batch size, num_heads, seq_len_q, seq_len_k) attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype) attention_scores = tf.divide(attention_scores, dk) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in TFAlbertModel call() function) attention_scores = tf.add(attention_scores, attention_mask) # Normalize the attention scores to probabilities. attention_probs = stable_softmax(logits=attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.attention_dropout(inputs=attention_probs, training=training) # Mask heads if we want to if head_mask is not None: attention_probs = tf.multiply(attention_probs, head_mask) context_layer = tf.matmul(attention_probs, value_layer) context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, all_head_size) context_layer = tf.reshape(tensor=context_layer, shape=(batch_size, -1, self.all_head_size)) self_outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) hidden_states = self_outputs[0] hidden_states = self.dense(inputs=hidden_states) hidden_states = self.output_dropout(inputs=hidden_states, training=training) attention_output = self.LayerNorm(inputs=hidden_states + input_tensor) # add attentions if we output them outputs = (attention_output,) + self_outputs[1:] return outputs class TFAlbertLayer(tf.keras.layers.Layer): def __init__(self, config: AlbertConfig, **kwargs): super().__init__(**kwargs) self.attention = TFAlbertAttention(config, name="attention") self.ffn = tf.keras.layers.Dense( units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="ffn" ) if isinstance(config.hidden_act, str): self.activation = get_tf_activation(config.hidden_act) else: self.activation = config.hidden_act self.ffn_output = tf.keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="ffn_output" ) self.full_layer_layer_norm = tf.keras.layers.LayerNormalization( epsilon=config.layer_norm_eps, name="full_layer_layer_norm" ) self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, training: bool = False, ) -> Tuple[tf.Tensor]: attention_outputs = self.attention( input_tensor=hidden_states, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, training=training, ) ffn_output = self.ffn(inputs=attention_outputs[0]) ffn_output = self.activation(ffn_output) ffn_output = self.ffn_output(inputs=ffn_output) ffn_output = self.dropout(inputs=ffn_output, training=training) hidden_states = self.full_layer_layer_norm(inputs=ffn_output + attention_outputs[0]) # add attentions if we output them outputs = (hidden_states,) + attention_outputs[1:] return outputs class TFAlbertLayerGroup(tf.keras.layers.Layer): def __init__(self, config: AlbertConfig, **kwargs): super().__init__(**kwargs) self.albert_layers = [ TFAlbertLayer(config, name=f"albert_layers_._{i}") for i in range(config.inner_group_num) ] def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, output_hidden_states: bool, training: bool = False, ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: layer_hidden_states = () if output_hidden_states else None layer_attentions = () if output_attentions else None for layer_index, albert_layer in enumerate(self.albert_layers): if output_hidden_states: layer_hidden_states = layer_hidden_states + (hidden_states,) layer_output = albert_layer( hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask[layer_index], output_attentions=output_attentions, training=training, ) hidden_states = layer_output[0] if output_attentions: layer_attentions = layer_attentions + (layer_output[1],) # Add last layer if output_hidden_states: layer_hidden_states = layer_hidden_states + (hidden_states,) return tuple(v for v in [hidden_states, layer_hidden_states, layer_attentions] if v is not None) class TFAlbertTransformer(tf.keras.layers.Layer): def __init__(self, config: AlbertConfig, **kwargs): super().__init__(**kwargs) self.num_hidden_layers = config.num_hidden_layers self.num_hidden_groups = config.num_hidden_groups # Number of layers in a hidden group self.layers_per_group = int(config.num_hidden_layers / config.num_hidden_groups) self.embedding_hidden_mapping_in = tf.keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="embedding_hidden_mapping_in", ) self.albert_layer_groups = [ TFAlbertLayerGroup(config, name=f"albert_layer_groups_._{i}") for i in range(config.num_hidden_groups) ] def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, output_hidden_states: bool, return_dict: bool, training: bool = False, ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: hidden_states = self.embedding_hidden_mapping_in(inputs=hidden_states) all_attentions = () if output_attentions else None all_hidden_states = (hidden_states,) if output_hidden_states else None for i in range(self.num_hidden_layers): # Index of the hidden group group_idx = int(i / (self.num_hidden_layers / self.num_hidden_groups)) layer_group_output = self.albert_layer_groups[group_idx]( hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask[group_idx * self.layers_per_group : (group_idx + 1) * self.layers_per_group], output_attentions=output_attentions, output_hidden_states=output_hidden_states, training=training, ) hidden_states = layer_group_output[0] if output_attentions: all_attentions = all_attentions + layer_group_output[-1] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) class TFAlbertPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = AlbertConfig base_model_prefix = "albert" class TFAlbertMLMHead(tf.keras.layers.Layer): def __init__(self, config: AlbertConfig, input_embeddings: tf.keras.layers.Layer, **kwargs): super().__init__(**kwargs) self.vocab_size = config.vocab_size self.embedding_size = config.embedding_size self.dense = tf.keras.layers.Dense( config.embedding_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) if isinstance(config.hidden_act, str): self.activation = get_tf_activation(config.hidden_act) else: self.activation = config.hidden_act self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = input_embeddings def build(self, input_shape: tf.TensorShape): self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias") self.decoder_bias = self.add_weight( shape=(self.vocab_size,), initializer="zeros", trainable=True, name="decoder/bias" ) super().build(input_shape) def get_output_embeddings(self) -> tf.keras.layers.Layer: return self.decoder def set_output_embeddings(self, value: tf.Variable): self.decoder.weight = value self.decoder.vocab_size = shape_list(value)[0] def get_bias(self) -> Dict[str, tf.Variable]: return {"bias": self.bias, "decoder_bias": self.decoder_bias} def set_bias(self, value: tf.Variable): self.bias = value["bias"] self.decoder_bias = value["decoder_bias"] self.vocab_size = shape_list(value["bias"])[0] def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.LayerNorm(inputs=hidden_states) seq_length = shape_list(tensor=hidden_states)[1] hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size]) hidden_states = tf.matmul(a=hidden_states, b=self.decoder.weight, transpose_b=True) hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.vocab_size]) hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.decoder_bias) return hidden_states @keras_serializable class TFAlbertMainLayer(tf.keras.layers.Layer): config_class = AlbertConfig def __init__(self, config: AlbertConfig, add_pooling_layer: bool = True, **kwargs): super().__init__(**kwargs) self.config = config self.embeddings = TFAlbertEmbeddings(config, name="embeddings") self.encoder = TFAlbertTransformer(config, name="encoder") self.pooler = ( tf.keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation="tanh", name="pooler", ) if add_pooling_layer else None ) def get_input_embeddings(self) -> tf.keras.layers.Layer: return self.embeddings def set_input_embeddings(self, value: tf.Variable): self.embeddings.weight = value self.embeddings.vocab_size = shape_list(value)[0] def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ raise NotImplementedError @unpack_inputs def call( self, input_ids: Optional[TFModelInputType] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if attention_mask is None: attention_mask = tf.fill(dims=input_shape, value=1) if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, training=training, ) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1])) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype) one_cst = tf.constant(1.0, dtype=embedding_output.dtype) ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype) extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.config.num_hidden_layers encoder_outputs = self.encoder( hidden_states=embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(inputs=sequence_output[:, 0]) if self.pooler is not None else None if not return_dict: return ( sequence_output, pooled_output, ) + encoder_outputs[1:] return TFBaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @dataclass class TFAlbertForPreTrainingOutput(ModelOutput): """ Output type of [`TFAlbertForPreTraining`]. Args: prediction_logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). sop_logits (`tf.Tensor` of shape `(batch_size, 2)`): Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor = None prediction_logits: tf.Tensor = None sop_logits: tf.Tensor = None hidden_states: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[tf.Tensor]] = None ALBERT_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Args: config ([`AlbertConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ ALBERT_INPUTS_DOCSTRING = r""" Args: input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AlbertTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare Albert Model transformer outputting raw hidden-states without any specific head on top.", ALBERT_START_DOCSTRING, ) class TFAlbertModel(TFAlbertPreTrainedModel): def __init__(self, config: AlbertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.albert = TFAlbertMainLayer(config, name="albert") @unpack_inputs @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: Optional[TFModelInputType] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: outputs = self.albert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def serving_output(self, output: TFBaseModelOutputWithPooling) -> TFBaseModelOutputWithPooling: hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFBaseModelOutputWithPooling( last_hidden_state=output.last_hidden_state, pooler_output=output.pooler_output, hidden_states=hs, attentions=attns, ) @add_start_docstrings( """ Albert Model with two heads on top for pretraining: a `masked language modeling` head and a `sentence order prediction` (classification) head. """, ALBERT_START_DOCSTRING, ) class TFAlbertForPreTraining(TFAlbertPreTrainedModel, TFAlbertPreTrainingLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"predictions.decoder.weight"] def __init__(self, config: AlbertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.albert = TFAlbertMainLayer(config, name="albert") self.predictions = TFAlbertMLMHead(config, input_embeddings=self.albert.embeddings, name="predictions") self.sop_classifier = TFAlbertSOPHead(config, name="sop_classifier") def get_lm_head(self) -> tf.keras.layers.Layer: return self.predictions @unpack_inputs @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFAlbertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: Optional[TFModelInputType] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[Union[np.ndarray, tf.Tensor]] = None, sentence_order_label: Optional[Union[np.ndarray, tf.Tensor]] = None, training: Optional[bool] = False, ) -> Union[TFAlbertForPreTrainingOutput, Tuple[tf.Tensor]]: r""" Return: Example: ```python >>> import tensorflow as tf >>> from transformers import AlbertTokenizer, TFAlbertForPreTraining >>> tokenizer = AlbertTokenizer.from_pretrained("albert-base-v2") >>> model = TFAlbertForPreTraining.from_pretrained("albert-base-v2") >>> input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] >>> # Batch size 1 >>> outputs = model(input_ids) >>> prediction_logits = outputs.prediction_logits >>> sop_logits = outputs.sop_logits ```""" outputs = self.albert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output, pooled_output = outputs[:2] prediction_scores = self.predictions(hidden_states=sequence_output) sop_scores = self.sop_classifier(pooled_output=pooled_output, training=training) total_loss = None if labels is not None and sentence_order_label is not None: d_labels = {"labels": labels} d_labels["sentence_order_label"] = sentence_order_label total_loss = self.hf_compute_loss(labels=d_labels, logits=(prediction_scores, sop_scores)) if not return_dict: output = (prediction_scores, sop_scores) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return TFAlbertForPreTrainingOutput( loss=total_loss, prediction_logits=prediction_scores, sop_logits=sop_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def serving_output(self, output: TFAlbertForPreTrainingOutput) -> TFAlbertForPreTrainingOutput: hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFAlbertForPreTrainingOutput( prediction_logits=output.prediction_logits, sop_logits=output.sop_logits, hidden_states=hs, attentions=attns, ) class TFAlbertSOPHead(tf.keras.layers.Layer): def __init__(self, config: AlbertConfig, **kwargs): super().__init__(**kwargs) self.dropout = tf.keras.layers.Dropout(rate=config.classifier_dropout_prob) self.classifier = tf.keras.layers.Dense( units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier", ) def call(self, pooled_output: tf.Tensor, training: bool) -> tf.Tensor: dropout_pooled_output = self.dropout(inputs=pooled_output, training=training) logits = self.classifier(inputs=dropout_pooled_output) return logits @add_start_docstrings("""Albert Model with a `language modeling` head on top.""", ALBERT_START_DOCSTRING) class TFAlbertForMaskedLM(TFAlbertPreTrainedModel, TFMaskedLanguageModelingLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions.decoder.weight"] def __init__(self, config: AlbertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.albert = TFAlbertMainLayer(config, add_pooling_layer=False, name="albert") self.predictions = TFAlbertMLMHead(config, input_embeddings=self.albert.embeddings, name="predictions") def get_lm_head(self) -> tf.keras.layers.Layer: return self.predictions @unpack_inputs @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: Optional[TFModelInputType] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[Union[np.ndarray, tf.Tensor]] = None, training: Optional[bool] = False, ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` Returns: Example: ```python >>> import tensorflow as tf >>> from transformers import AlbertTokenizer, TFAlbertForMaskedLM >>> tokenizer = AlbertTokenizer.from_pretrained("albert-base-v2") >>> model = TFAlbertForMaskedLM.from_pretrained("albert-base-v2") >>> # add mask_token >>> inputs = tokenizer(f"The capital of [MASK] is Paris.", return_tensors="tf") >>> logits = model(**inputs).logits >>> # retrieve index of [MASK] >>> mask_token_index = tf.where(inputs.input_ids == tokenizer.mask_token_id)[0][1] >>> predicted_token_id = tf.math.argmax(logits[0, mask_token_index], axis=-1) >>> tokenizer.decode(predicted_token_id) 'france' ``` ```python >>> labels = tokenizer("The capital of France is Paris.", return_tensors="tf")["input_ids"] >>> labels = tf.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100) >>> outputs = model(**inputs, labels=labels) >>> round(float(outputs.loss), 2) 0.81 ``` """ outputs = self.albert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] prediction_scores = self.predictions(hidden_states=sequence_output, training=training) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=prediction_scores) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFMaskedLMOutput( loss=loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) # Copied from transformers.models.bert.modeling_tf_bert.TFBertForMaskedLM.serving_output def serving_output(self, output: TFMaskedLMOutput) -> TFMaskedLMOutput: hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFMaskedLMOutput(logits=output.logits, hidden_states=hs, attentions=attns) @add_start_docstrings( """ Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, ALBERT_START_DOCSTRING, ) class TFAlbertForSequenceClassification(TFAlbertPreTrainedModel, TFSequenceClassificationLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"predictions"] _keys_to_ignore_on_load_missing = [r"dropout"] def __init__(self, config: AlbertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.albert = TFAlbertMainLayer(config, name="albert") self.dropout = tf.keras.layers.Dropout(rate=config.classifier_dropout_prob) self.classifier = tf.keras.layers.Dense( units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) @unpack_inputs @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint="vumichien/albert-base-v2-imdb", output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output="'LABEL_1'", expected_loss=0.12, ) def call( self, input_ids: Optional[TFModelInputType] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[Union[np.ndarray, tf.Tensor]] = None, training: Optional[bool] = False, ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ outputs = self.albert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) pooled_output = outputs[1] pooled_output = self.dropout(inputs=pooled_output, training=training) logits = self.classifier(inputs=pooled_output) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) # Copied from transformers.models.bert.modeling_tf_bert.TFBertForSequenceClassification.serving_output def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput: hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFSequenceClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns) @add_start_docstrings( """ Albert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, ALBERT_START_DOCSTRING, ) class TFAlbertForTokenClassification(TFAlbertPreTrainedModel, TFTokenClassificationLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions"] _keys_to_ignore_on_load_missing = [r"dropout"] def __init__(self, config: AlbertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.albert = TFAlbertMainLayer(config, add_pooling_layer=False, name="albert") classifier_dropout_prob = ( config.classifier_dropout_prob if config.classifier_dropout_prob is not None else config.hidden_dropout_prob ) self.dropout = tf.keras.layers.Dropout(rate=classifier_dropout_prob) self.classifier = tf.keras.layers.Dense( units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) @unpack_inputs @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint="vumichien/tiny-albert", output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=( "['LABEL_1', 'LABEL_1', 'LABEL_1', 'LABEL_0', 'LABEL_1', 'LABEL_0', 'LABEL_1', 'LABEL_1', " "'LABEL_0', 'LABEL_1', 'LABEL_0', 'LABEL_0', 'LABEL_1', 'LABEL_1']" ), expected_loss=0.66, ) def call( self, input_ids: Optional[TFModelInputType] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[Union[np.ndarray, tf.Tensor]] = None, training: Optional[bool] = False, ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ outputs = self.albert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] sequence_output = self.dropout(inputs=sequence_output, training=training) logits = self.classifier(inputs=sequence_output) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFTokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) # Copied from transformers.models.bert.modeling_tf_bert.TFBertForTokenClassification.serving_output def serving_output(self, output: TFTokenClassifierOutput) -> TFTokenClassifierOutput: hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFTokenClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns) @add_start_docstrings( """ Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`). """, ALBERT_START_DOCSTRING, ) class TFAlbertForQuestionAnswering(TFAlbertPreTrainedModel, TFQuestionAnsweringLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions"] def __init__(self, config: AlbertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.albert = TFAlbertMainLayer(config, add_pooling_layer=False, name="albert") self.qa_outputs = tf.keras.layers.Dense( units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs" ) @unpack_inputs @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint="vumichien/albert-base-v2-squad2", output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, qa_target_start_index=12, qa_target_end_index=13, expected_output="'a nice puppet'", expected_loss=7.36, ) def call( self, input_ids: Optional[TFModelInputType] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, start_positions: Optional[Union[np.ndarray, tf.Tensor]] = None, end_positions: Optional[Union[np.ndarray, tf.Tensor]] = None, training: Optional[bool] = False, ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]: r""" start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ outputs = self.albert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] logits = self.qa_outputs(inputs=sequence_output) start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1) start_logits = tf.squeeze(input=start_logits, axis=-1) end_logits = tf.squeeze(input=end_logits, axis=-1) loss = None if start_positions is not None and end_positions is not None: labels = {"start_position": start_positions} labels["end_position"] = end_positions loss = self.hf_compute_loss(labels=labels, logits=(start_logits, end_logits)) if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFQuestionAnsweringModelOutput( loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) # Copied from transformers.models.bert.modeling_tf_bert.TFBertForQuestionAnswering.serving_output def serving_output(self, output: TFQuestionAnsweringModelOutput) -> TFQuestionAnsweringModelOutput: hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFQuestionAnsweringModelOutput( start_logits=output.start_logits, end_logits=output.end_logits, hidden_states=hs, attentions=attns ) @add_start_docstrings( """ Albert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, ALBERT_START_DOCSTRING, ) class TFAlbertForMultipleChoice(TFAlbertPreTrainedModel, TFMultipleChoiceLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions"] _keys_to_ignore_on_load_missing = [r"dropout"] def __init__(self, config: AlbertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.albert = TFAlbertMainLayer(config, name="albert") self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) self.classifier = tf.keras.layers.Dense( units=1, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) @property def dummy_inputs(self): """ Dummy inputs to build the network. Returns: tf.Tensor with dummy inputs """ return {"input_ids": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS)} @unpack_inputs @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: Optional[TFModelInputType] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[Union[np.ndarray, tf.Tensor]] = None, training: Optional[bool] = False, ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ if input_ids is not None: num_choices = shape_list(input_ids)[1] seq_length = shape_list(input_ids)[2] else: num_choices = shape_list(inputs_embeds)[1] seq_length = shape_list(inputs_embeds)[2] flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = ( tf.reshape(tensor=attention_mask, shape=(-1, seq_length)) if attention_mask is not None else None ) flat_token_type_ids = ( tf.reshape(tensor=token_type_ids, shape=(-1, seq_length)) if token_type_ids is not None else None ) flat_position_ids = ( tf.reshape(tensor=position_ids, shape=(-1, seq_length)) if position_ids is not None else None ) flat_inputs_embeds = ( tf.reshape(tensor=inputs_embeds, shape=(-1, seq_length, shape_list(inputs_embeds)[3])) if inputs_embeds is not None else None ) outputs = self.albert( input_ids=flat_input_ids, attention_mask=flat_attention_mask, token_type_ids=flat_token_type_ids, position_ids=flat_position_ids, head_mask=head_mask, inputs_embeds=flat_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) pooled_output = outputs[1] pooled_output = self.dropout(inputs=pooled_output, training=training) logits = self.classifier(inputs=pooled_output) reshaped_logits = tf.reshape(tensor=logits, shape=(-1, num_choices)) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=reshaped_logits) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFMultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @tf.function( input_signature=[ { "input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"), "attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"), "token_type_ids": tf.TensorSpec((None, None, None), tf.int32, name="token_type_ids"), } ] ) # Copied from transformers.models.bert.modeling_tf_bert.TFBertForMultipleChoice.serving def serving(self, inputs: Dict[str, tf.Tensor]) -> TFMultipleChoiceModelOutput: output = self.call(input_ids=inputs) return self.serving_output(output) # Copied from transformers.models.bert.modeling_tf_bert.TFBertForMultipleChoice.serving_output def serving_output(self, output: TFMultipleChoiceModelOutput) -> TFMultipleChoiceModelOutput: hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFMultipleChoiceModelOutput(logits=output.logits, hidden_states=hs, attentions=attns)
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./tests/models/marian/__init__.py
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./.git/objects/3d/ece8e4239ae951a70edb6c3fa48d30878d0da5
x}V{<Tyn.m(V0 oҢ"%IY 33gfNfshƪMwTl XJxCauEF%Jn=gn$3?<yBF8:88;,q'P->i HJdJU0Q #DZ9nzb-e:[$Ip) Ev3ƭwmj#y ِ靨ՐW;.nqv > HA4U\]rLKujOJ 2Р?uΎԈ88eMjL,Ғٍ)/"^ 1W͜0G*{|1el ^[#( @ &`mDyC( P#( 0#,IM%]KorG㱕whýbb<d#p4lv;?ϯe Xˮ 5 #A 'Ҩo.-,ɹ ߧ9%<L g\HH0ȓ #u5S/a:6¡b,)Wҭ\t{dmRUɂ"`j+s.ӆom&n۸&~ bBb/5tIl[tQ',b$q9IG\ۅtOKvGg Iec%}?v~ڳޮjDa&GX"ސy]Uܕ:v†@\B$Qry""!|MɎ;(k=,hq^e<91BTM':LZ%izm$,=@HE+,ͪLQ,Jmyx9!XyKUMc% 0C̕&=9512}[ a?Ȭt\R3GrHASq:?Z7PE/rVC%B% !hHl)R/Ȑou;űEn („0  j1_kڿ3g' $`l+Pz*toˡT1>~_ԗb*:Z쀬i6NpnJ;ݛ<gd{,j{j I1@f"+-w?v|4=&m>%Q."M^Y }/pIG :cb]_5 3ϑ6X`;$as;S-bCIb.ኤRƋ%.G&- ;"G3c#ΕmFdjX5ݛś9~^֐`cdm.삛K 8)?Q.9#0)Z+y<g벘xB|%kP4р%=>'WgSfʹTٸQnvVޙoӎO)熼)Vn[9J0VL+dҝ&Ɩ 4#AM7Z=zg-[22{ƕ'>v!qyε*eD}5ჀnN8zY"U]Կ3b7liuWU7\}]`l>+x%7YO݄;/ɰ?W(S&Mv}?*퓧f.J0+-%٥؟vbڂidRCb||zJcI~zZCϡ0 nhe1!ۖ8ˠhk|t {&˫[橷$6bu Efk݂I6&oh'7&yvvT13pmY_E6%MFlm"}%B^zƔBb{=oNӯ3V4H6NTmq|z'wassX_%u/U.,6 N\TKFzX.3ݗ/uцOk3-Wi[Y{ j)>7[D̂% 80apє{'ďNAMO7b/%_$gJ:JVo2"9C *8P>͚"c_SNKc.+l]#5=
x}V{<Tyn.m(V0 oҢ"%IY 33gfNfshƪMwTl XJxCauEF%Jn=gn$3?<yBF8:88;,q'P->i HJdJU0Q #DZ9nzb-e:[$Ip) Ev3ƭwmj#y ِ靨ՐW;.nqv > HA4U\]rLKujOJ 2Р?uΎԈ88eMjL,Ғٍ)/"^ 1W͜0G*{|1el ^[#( @ &`mDyC( P#( 0#,IM%]KorG㱕whýbb<d#p4lv;?ϯe Xˮ 5 #A 'Ҩo.-,ɹ ߧ9%<L g\HH0ȓ #u5S/a:6¡b,)Wҭ\t{dmRUɂ"`j+s.ӆom&n۸&~ bBb/5tIl[tQ',b$q9IG\ۅtOKvGg Iec%}?v~ڳޮjDa&GX"ސy]Uܕ:v†@\B$Qry""!|MɎ;(k=,hq^e<91BTM':LZ%izm$,=@HE+,ͪLQ,Jmyx9!XyKUMc% 0C̕&=9512}[ a?Ȭt\R3GrHASq:?Z7PE/rVC%B% !hHl)R/Ȑou;űEn („0  j1_kڿ3g' $`l+Pz*toˡT1>~_ԗb*:Z쀬i6NpnJ;ݛ<gd{,j{j I1@f"+-w?v|4=&m>%Q."M^Y }/pIG :cb]_5 3ϑ6X`;$as;S-bCIb.ኤRƋ%.G&- ;"G3c#ΕmFdjX5ݛś9~^֐`cdm.삛K 8)?Q.9#0)Z+y<g벘xB|%kP4р%=>'WgSfʹTٸQnvVޙoӎO)熼)Vn[9J0VL+dҝ&Ɩ 4#AM7Z=zg-[22{ƕ'>v!qyε*eD}5ჀnN8zY"U]Կ3b7liuWU7\}]`l>+x%7YO݄;/ɰ?W(S&Mv}?*퓧f.J0+-%٥؟vbڂidRCb||zJcI~zZCϡ0 nhe1!ۖ8ˠhk|t {&˫[橷$6bu Efk݂I6&oh'7&yvvT13pmY_E6%MFlm"}%B^zƔBb{=oNӯ3V4H6NTmq|z'wassX_%u/U.,6 N\TKFzX.3ݗ/uцOk3-Wi[Y{ j)>7[D̂% 80apє{'ďNAMO7b/%_$gJ:JVo2"9C *8P>͚"c_SNKc.+l]#5=
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./tests/models/encoder_decoder/test_modeling_tf_encoder_decoder.py
# coding=utf-8 # Copyright 2020 HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import os import tempfile import unittest import numpy as np from transformers import is_tf_available, is_torch_available from transformers.testing_utils import is_pt_tf_cross_test, require_tf, require_torch, slow, torch_device from transformers.utils.generic import ModelOutput from ...test_modeling_tf_common import ids_tensor from ..bert.test_modeling_tf_bert import TFBertModelTester from ..gpt2.test_modeling_tf_gpt2 import TFGPT2ModelTester from ..rembert.test_modeling_tf_rembert import TFRemBertModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester if is_tf_available(): import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EncoderDecoderConfig, TFAutoModel, TFAutoModelForCausalLM, TFBertLMHeadModel, TFBertModel, TFEncoderDecoderModel, TFGPT2LMHeadModel, TFRemBertForCausalLM, TFRemBertModel, TFRobertaForCausalLM, TFRobertaModel, ) from transformers.modeling_tf_outputs import TFBaseModelOutput if is_torch_available(): import torch from transformers import BertLMHeadModel, BertModel, EncoderDecoderModel @require_tf class TFEncoderDecoderMixin: def get_encoder_decoder_model(self, config, decoder_config): raise NotImplementedError def prepare_config_and_inputs(self): raise NotImplementedError def get_pretrained_model(self): raise NotImplementedError def check_encoder_decoder_model_from_pretrained_configs( self, config, input_ids, attention_mask, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs ): encoder_decoder_config = EncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) self.assertTrue(encoder_decoder_config.decoder.is_decoder) enc_dec_model = TFEncoderDecoderModel(encoder_decoder_config) self.assertTrue(enc_dec_model.config.is_encoder_decoder) outputs_encoder_decoder = enc_dec_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, kwargs=kwargs, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) self.assertEqual( outputs_encoder_decoder["encoder_last_hidden_state"].shape, (input_ids.shape + (config.hidden_size,)) ) def check_encoder_decoder_model( self, config, input_ids, attention_mask, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = TFEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) self.assertTrue(enc_dec_model.config.decoder.is_decoder) self.assertTrue(enc_dec_model.config.decoder.add_cross_attention) self.assertTrue(enc_dec_model.config.is_encoder_decoder) outputs_encoder_decoder = enc_dec_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, kwargs=kwargs, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) self.assertEqual( outputs_encoder_decoder["encoder_last_hidden_state"].shape, (input_ids.shape + (config.hidden_size,)) ) encoder_outputs = TFBaseModelOutput(last_hidden_state=encoder_hidden_states) outputs_encoder_decoder = enc_dec_model( input_ids=None, encoder_outputs=encoder_outputs, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, kwargs=kwargs, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) self.assertEqual( outputs_encoder_decoder["encoder_last_hidden_state"].shape, (input_ids.shape + (config.hidden_size,)) ) def check_encoder_decoder_model_from_pretrained( self, config, input_ids, attention_mask, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, return_dict, **kwargs ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model, "return_dict": return_dict} enc_dec_model = TFEncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs) outputs_encoder_decoder = enc_dec_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, return_dict=True, kwargs=kwargs, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) self.assertEqual( outputs_encoder_decoder["encoder_last_hidden_state"].shape, (input_ids.shape + (config.hidden_size,)) ) def check_save_and_load( self, config, input_ids, attention_mask, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = TFEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) outputs = enc_dec_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, kwargs=kwargs, ) out_2 = np.array(outputs[0]) out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmpdirname: enc_dec_model.save_pretrained(tmpdirname) enc_dec_model = TFEncoderDecoderModel.from_pretrained(tmpdirname) after_outputs = enc_dec_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, kwargs=kwargs, ) out_1 = np.array(after_outputs[0]) out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) def check_encoder_decoder_model_labels( self, config, input_ids, attention_mask, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, labels, **kwargs ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = TFEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) outputs_encoder_decoder = enc_dec_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, labels=labels, kwargs=kwargs, ) # Make sure `loss` exist self.assertIn("loss", outputs_encoder_decoder) batch_size, seq_len = decoder_input_ids.shape expected_shape = (batch_size, seq_len, decoder_config.vocab_size) self.assertEqual(outputs_encoder_decoder["logits"].shape, expected_shape) self.assertEqual( outputs_encoder_decoder["encoder_last_hidden_state"].shape, (input_ids.shape + (config.hidden_size,)) ) def _check_output_with_attentions( self, outputs_encoder_decoder, config, input_ids, decoder_config, decoder_input_ids ): encoder_attentions = outputs_encoder_decoder["encoder_attentions"] self.assertEqual(len(encoder_attentions), config.num_hidden_layers) self.assertEqual( encoder_attentions[0].shape[-3:], (config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) ) decoder_attentions = outputs_encoder_decoder["decoder_attentions"] num_decoder_layers = ( decoder_config.num_decoder_layers if hasattr(decoder_config, "num_decoder_layers") else decoder_config.num_hidden_layers ) self.assertEqual(len(decoder_attentions), num_decoder_layers) self.assertEqual( decoder_attentions[0].shape[-3:], (decoder_config.num_attention_heads, decoder_input_ids.shape[-1], decoder_input_ids.shape[-1]), ) cross_attentions = outputs_encoder_decoder["cross_attentions"] self.assertEqual(len(cross_attentions), num_decoder_layers) cross_attention_input_seq_len = decoder_input_ids.shape[-1] * ( 1 + (decoder_config.ngram if hasattr(decoder_config, "ngram") else 0) ) self.assertEqual( cross_attentions[0].shape[-3:], (decoder_config.num_attention_heads, cross_attention_input_seq_len, input_ids.shape[-1]), ) def check_encoder_decoder_model_output_attentions( self, config, input_ids, attention_mask, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs ): # make the decoder inputs a different shape from the encoder inputs to harden the test decoder_input_ids = decoder_input_ids[:, :-1] decoder_attention_mask = decoder_attention_mask[:, :-1] encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = TFEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) outputs_encoder_decoder = enc_dec_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, output_attentions=True, kwargs=kwargs, ) self._check_output_with_attentions( outputs_encoder_decoder, config, input_ids, decoder_config, decoder_input_ids ) def check_encoder_decoder_model_output_attentions_from_config( self, config, input_ids, attention_mask, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs ): # Similar to `check_encoder_decoder_model_output_attentions`, but with `output_attentions` triggered from the # config file. Contrarily to most models, changing the model's config won't work -- the defaults are loaded # from the inner models' configurations. decoder_input_ids = decoder_input_ids[:, :-1] decoder_attention_mask = decoder_attention_mask[:, :-1] encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = TFEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) enc_dec_model.config.output_attentions = True # model config -> won't work outputs_encoder_decoder = enc_dec_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, kwargs=kwargs, ) self.assertTrue( all( key not in outputs_encoder_decoder for key in ["encoder_attentions", "decoder_attentions", "cross_attentions"] ) ) config.output_attentions = True # inner model config -> will work decoder_config.output_attentions = True encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = TFEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) outputs_encoder_decoder = enc_dec_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, kwargs=kwargs, ) self._check_output_with_attentions( outputs_encoder_decoder, config, input_ids, decoder_config, decoder_input_ids ) def check_encoder_decoder_model_generate(self, input_ids, config, decoder_config, **kwargs): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = TFEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) # Generate until max length if hasattr(enc_dec_model.config, "eos_token_id"): enc_dec_model.config.eos_token_id = None if hasattr(enc_dec_model.config, "decoder") and hasattr(enc_dec_model.config.decoder, "eos_token_id"): enc_dec_model.config.decoder.eos_token_id = None # Bert does not have a bos token id, so use pad_token_id instead generated_output = enc_dec_model.generate( input_ids, decoder_start_token_id=enc_dec_model.config.decoder.pad_token_id ) self.assertEqual(tuple(generated_output.shape.as_list()), (input_ids.shape[0],) + (decoder_config.max_length,)) def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-5, name="outputs", attributes=None): """Check the outputs from PyTorch and TensorFlow models are close enough. Checks are done in a recursive way. Args: model_class: The class of the model that is currently testing. For example, `TFBertModel`, TFBertForMaskedLM`, `TFBertForSequenceClassification`, etc. Mainly used for providing more informative error messages. name (`str`): The name of the output. For example, `output.hidden_states`, `output.attentions`, etc. attributes (`Tuple[str]`): The names of the output's element if the output is a tuple/list with each element being a named field in the output. """ self.assertEqual(type(name), str) if attributes is not None: self.assertEqual(type(attributes), tuple, f"{name}: The argument `attributes` should be a `tuple`") # Allow `ModelOutput` (e.g. `CLIPOutput` has `text_model_output` and `vision_model_output`). if isinstance(tf_outputs, ModelOutput): self.assertTrue( isinstance(pt_outputs, ModelOutput), f"{name}: `pt_outputs` should an instance of `ModelOutput` when `tf_outputs` is", ) tf_keys = [k for k, v in tf_outputs.items() if v is not None] pt_keys = [k for k, v in pt_outputs.items() if v is not None] self.assertEqual(tf_keys, pt_keys, f"{name}: Output keys differ between TF and PyTorch") # convert to the case of `tuple` # appending each key to the current (string) `names` attributes = tuple([f"{name}.{k}" for k in tf_keys]) self.check_pt_tf_outputs( tf_outputs.to_tuple(), pt_outputs.to_tuple(), model_class, tol=tol, name=name, attributes=attributes ) # Allow `list` (e.g. `TransfoXLModelOutput.mems` is a list of tensors.) elif type(tf_outputs) in [tuple, list]: self.assertEqual(type(tf_outputs), type(pt_outputs), f"{name}: Output types differ between TF and PyTorch") self.assertEqual(len(tf_outputs), len(pt_outputs), f"{name}: Output lengths differ between TF and PyTorch") if attributes is not None: # case 1: each output has assigned name (e.g. a tuple form of a `ModelOutput`) self.assertEqual( len(attributes), len(tf_outputs), f"{name}: The tuple `names` should have the same length as `tf_outputs`", ) else: # case 2: each output has no assigned name (e.g. hidden states of each layer) -> add an index to `names` attributes = tuple([f"{name}_{idx}" for idx in range(len(tf_outputs))]) for tf_output, pt_output, attr in zip(tf_outputs, pt_outputs, attributes): self.check_pt_tf_outputs(tf_output, pt_output, model_class, tol=tol, name=attr) elif isinstance(tf_outputs, tf.Tensor): self.assertTrue( isinstance(pt_outputs, torch.Tensor), f"{name}: `pt_outputs` should a tensor when `tf_outputs` is" ) tf_outputs = tf_outputs.numpy() pt_outputs = pt_outputs.detach().to("cpu").numpy() self.assertEqual( tf_outputs.shape, pt_outputs.shape, f"{name}: Output shapes differ between TF and PyTorch" ) # deal with NumPy's scalars to make replacing nan values by 0 work. if np.isscalar(tf_outputs): tf_outputs = np.array([tf_outputs]) pt_outputs = np.array([pt_outputs]) tf_nans = np.isnan(tf_outputs) pt_nans = np.isnan(pt_outputs) pt_outputs[tf_nans] = 0 tf_outputs[tf_nans] = 0 pt_outputs[pt_nans] = 0 tf_outputs[pt_nans] = 0 max_diff = np.amax(np.abs(tf_outputs - pt_outputs)) self.assertLessEqual(max_diff, tol, f"{name}: Difference between torch and tf is {max_diff} (>= {tol}).") else: raise ValueError( "`tf_outputs` should be an instance of `tf.Tensor`, a `tuple`, or an instance of `tf.Tensor`. Got" f" {type(tf_outputs)} instead." ) def prepare_pt_inputs_from_tf_inputs(self, tf_inputs_dict): pt_inputs_dict = {} for name, key in tf_inputs_dict.items(): if type(key) == bool: pt_inputs_dict[name] = key elif name == "input_values": pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) elif name == "pixel_values": pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) elif name == "input_features": pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) # other general float inputs elif tf_inputs_dict[name].dtype.is_floating: pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) else: pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.long) return pt_inputs_dict def check_pt_tf_models(self, tf_model, pt_model, tf_inputs_dict): pt_inputs_dict = self.prepare_pt_inputs_from_tf_inputs(tf_inputs_dict) # send pytorch inputs to the correct device pt_inputs_dict = { k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs_dict.items() } # send pytorch model to the correct device pt_model.to(torch_device) # Check predictions on first output (logits/hidden-states) are close enough given low-level computational differences pt_model.eval() with torch.no_grad(): pt_outputs = pt_model(**pt_inputs_dict) tf_outputs = tf_model(tf_inputs_dict) # tf models returned loss is usually a tensor rather than a scalar. # (see `hf_compute_loss`: it uses `tf.keras.losses.Reduction.NONE`) # Change it here to a scalar to match PyTorch models' loss tf_loss = getattr(tf_outputs, "loss", None) if tf_loss is not None: tf_outputs.loss = tf.math.reduce_mean(tf_loss) self.check_pt_tf_outputs(tf_outputs, pt_outputs, type(tf_model)) def check_pt_tf_equivalence(self, tf_model, pt_model, tf_inputs_dict): """Wrap `check_pt_tf_models` to further check PT -> TF again""" self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict) # PT -> TF with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) tf_model = TFEncoderDecoderModel.from_pretrained(tmpdirname, from_pt=True) self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict) def check_pt_to_tf_equivalence(self, config, decoder_config, tf_inputs_dict): """EncoderDecoderModel requires special way to cross load (PT -> TF)""" encoder_decoder_config = EncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) # Output all for aggressive testing encoder_decoder_config.output_hidden_states = True # All models tested in this file have attentions encoder_decoder_config.output_attentions = True pt_model = EncoderDecoderModel(encoder_decoder_config) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) tf_model = TFEncoderDecoderModel.from_pretrained(tmpdirname, from_pt=True) self.check_pt_tf_equivalence(tf_model, pt_model, tf_inputs_dict) def check_tf_to_pt_equivalence(self, config, decoder_config, tf_inputs_dict): """EncoderDecoderModel requires special way to cross load (TF -> PT)""" encoder_decoder_config = EncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) # Output all for aggressive testing encoder_decoder_config.output_hidden_states = True # TODO: A generalizable way to determine this attribute encoder_decoder_config.output_attentions = True tf_model = TFEncoderDecoderModel(encoder_decoder_config) # Make sure model is built before saving tf_model(**tf_inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: tf_model.save_pretrained(tmpdirname) pt_model = EncoderDecoderModel.from_pretrained(tmpdirname, from_tf=True) self.check_pt_tf_equivalence(tf_model, pt_model, tf_inputs_dict) def test_encoder_decoder_model(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model(**input_ids_dict) def test_encoder_decoder_model_from_pretrained_configs(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained_configs(**input_ids_dict) def test_encoder_decoder_model_from_pretrained(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained(**input_ids_dict, return_dict=False) def test_encoder_decoder_model_from_pretrained_return_dict(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained(**input_ids_dict, return_dict=True) def test_save_and_load_from_pretrained(self): input_ids_dict = self.prepare_config_and_inputs() self.check_save_and_load(**input_ids_dict) def test_encoder_decoder_model_labels(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_labels(**input_ids_dict) def test_encoder_decoder_model_output_attentions(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_output_attentions(**input_ids_dict) def test_encoder_decoder_model_output_attentions_from_config(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_output_attentions_from_config(**input_ids_dict) def test_encoder_decoder_model_generate(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_generate(**input_ids_dict) def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float): diff = np.abs((a - b)).max() self.assertLessEqual(diff, tol, f"Difference between torch and tf is {diff} (>= {tol}).") @is_pt_tf_cross_test def test_pt_tf_model_equivalence(self): config_inputs_dict = self.prepare_config_and_inputs() labels = config_inputs_dict.pop("decoder_token_labels") # Keep only common arguments arg_names = [ "config", "input_ids", "attention_mask", "decoder_config", "decoder_input_ids", "decoder_attention_mask", "encoder_hidden_states", ] config_inputs_dict = {k: v for k, v in config_inputs_dict.items() if k in arg_names} config = config_inputs_dict.pop("config") decoder_config = config_inputs_dict.pop("decoder_config") # Output all for aggressive testing config.output_hidden_states = True decoder_config.output_hidden_states = True # All models tested in this file have attentions config.output_attentions = True decoder_config.output_attentions = True tf_inputs_dict = config_inputs_dict # `encoder_hidden_states` is not used in model call/forward del tf_inputs_dict["encoder_hidden_states"] # Make sure no sequence has all zeros as attention mask, otherwise some tests fail due to the inconsistency # of the usage `1e-4`, `1e-9`, `1e-30`, `-inf`. for k in ["attention_mask", "decoder_attention_mask"]: attention_mask = tf_inputs_dict[k] # Make sure no all 0s attention masks - to avoid failure at this moment. # Put `1` at the beginning of sequences to make it still work when combining causal attention masks. # TODO: remove this line once a fix regarding large negative values for attention mask is done. attention_mask = tf.concat( [tf.ones_like(attention_mask[:, :1], dtype=attention_mask.dtype), attention_mask[:, 1:]], axis=-1 ) tf_inputs_dict[k] = attention_mask tf_inputs_dict_with_labels = copy.copy(tf_inputs_dict) tf_inputs_dict_with_labels["labels"] = labels self.assertTrue(decoder_config.cross_attention_hidden_size is None) # Original test: check without `labels` and without `enc_to_dec_proj` projection self.assertTrue(config.hidden_size == decoder_config.hidden_size) self.check_pt_to_tf_equivalence(config, decoder_config, tf_inputs_dict) self.check_tf_to_pt_equivalence(config, decoder_config, tf_inputs_dict) # check with `labels` self.check_pt_to_tf_equivalence(config, decoder_config, tf_inputs_dict_with_labels) self.check_tf_to_pt_equivalence(config, decoder_config, tf_inputs_dict_with_labels) # check `enc_to_dec_proj` work as expected decoder_config.hidden_size = decoder_config.hidden_size * 2 self.assertTrue(config.hidden_size != decoder_config.hidden_size) self.check_pt_to_tf_equivalence(config, decoder_config, tf_inputs_dict) self.check_tf_to_pt_equivalence(config, decoder_config, tf_inputs_dict) def test_model_save_load_from_pretrained(self): model_2 = self.get_pretrained_model() input_ids = ids_tensor([13, 5], model_2.config.encoder.vocab_size) decoder_input_ids = ids_tensor([13, 1], model_2.config.decoder.vocab_size) attention_mask = ids_tensor([13, 5], vocab_size=2) outputs = model_2( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, ) out_2 = np.array(outputs[0]) out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmp_dirname: model_2.save_pretrained(tmp_dirname) model_1 = TFEncoderDecoderModel.from_pretrained(tmp_dirname) after_outputs = model_1( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, ) out_1 = np.array(after_outputs[0]) out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) @require_tf class TFBertEncoderDecoderModelTest(TFEncoderDecoderMixin, unittest.TestCase): def setUp(self): self.encoder_model_tester = TFBertModelTester(self, batch_size=13) self.decoder_model_tester = TFBertModelTester(self, batch_size=13) def get_pretrained_model(self): return TFEncoderDecoderModel.from_encoder_decoder_pretrained( "hf-internal-testing/tiny-random-bert", "hf-internal-testing/tiny-random-bert", ) def get_encoder_decoder_model(self, config, decoder_config): encoder_model = TFBertModel(config, name="encoder") decoder_model = TFBertLMHeadModel(decoder_config, name="decoder") return encoder_model, decoder_model def prepare_config_and_inputs(self): encoder_config_and_inputs = self.encoder_model_tester.prepare_config_and_inputs() decoder_config_and_inputs = self.decoder_model_tester.prepare_config_and_inputs_for_decoder() ( config, input_ids, token_type_ids, attention_mask, sequence_labels, token_labels, choice_labels, ) = encoder_config_and_inputs ( decoder_config, decoder_input_ids, decoder_token_type_ids, decoder_attention_mask, decoder_sequence_labels, decoder_token_labels, decoder_choice_labels, encoder_hidden_states, encoder_attention_mask, ) = decoder_config_and_inputs # make sure that cross attention layers are added decoder_config.add_cross_attention = True # disable cache for now decoder_config.use_cache = False return { "config": config, "input_ids": input_ids, "attention_mask": attention_mask, "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_token_type_ids": decoder_token_type_ids, "decoder_attention_mask": decoder_attention_mask, "decoder_sequence_labels": decoder_sequence_labels, "decoder_token_labels": decoder_token_labels, "decoder_choice_labels": decoder_choice_labels, "encoder_hidden_states": encoder_hidden_states, "labels": decoder_token_labels, } @slow @is_pt_tf_cross_test def test_bert2bert_summarization(self): from transformers import EncoderDecoderModel tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") """Not working, because pt checkpoint has `encoder.encoder.layer...` while tf model has `encoder.bert.encoder.layer...`. (For Bert decoder, there is no issue, because `BertModel` is wrapped into `decoder` as `bert`) model = TFEncoderDecoderModel.from_pretrained("patrickvonplaten/bert2bert-cnn_dailymail-fp16", from_pt=True) """ # workaround to load from pt _model = EncoderDecoderModel.from_pretrained("patrickvonplaten/bert2bert-cnn_dailymail-fp16") _model.encoder.save_pretrained("./encoder") _model.decoder.save_pretrained("./decoder") model = TFEncoderDecoderModel.from_encoder_decoder_pretrained( "./encoder", "./decoder", encoder_from_pt=True, decoder_from_pt=True ) model.config = _model.config ARTICLE_STUDENTS = """(CNN)Sigma Alpha Epsilon is under fire for a video showing party-bound fraternity members singing a racist chant. SAE's national chapter suspended the students, but University of Oklahoma President David Boren took it a step further, saying the university's affiliation with the fraternity is permanently done. The news is shocking, but it's not the first time SAE has faced controversy. SAE was founded March 9, 1856, at the University of Alabama, five years before the American Civil War, according to the fraternity website. When the war began, the group had fewer than 400 members, of which "369 went to war for the Confederate States and seven for the Union Army," the website says. The fraternity now boasts more than 200,000 living alumni, along with about 15,000 undergraduates populating 219 chapters and 20 "colonies" seeking full membership at universities. SAE has had to work hard to change recently after a string of member deaths, many blamed on the hazing of new recruits, SAE national President Bradley Cohen wrote in a message on the fraternity's website. The fraternity's website lists more than 130 chapters cited or suspended for "health and safety incidents" since 2010. At least 30 of the incidents involved hazing, and dozens more involved alcohol. However, the list is missing numerous incidents from recent months. Among them, according to various media outlets: Yale University banned the SAEs from campus activities last month after members allegedly tried to interfere with a sexual misconduct investigation connected to an initiation rite. Stanford University in December suspended SAE housing privileges after finding sorority members attending a fraternity function were subjected to graphic sexual content. And Johns Hopkins University in November suspended the fraternity for underage drinking. "The media has labeled us as the 'nation's deadliest fraternity,' " Cohen said. In 2011, for example, a student died while being coerced into excessive alcohol consumption, according to a lawsuit. SAE's previous insurer dumped the fraternity. "As a result, we are paying Lloyd's of London the highest insurance rates in the Greek-letter world," Cohen said. Universities have turned down SAE's attempts to open new chapters, and the fraternity had to close 12 in 18 months over hazing incidents.""" EXPECTED_SUMMARY_STUDENTS = """sae was founded in 1856, five years before the civil war. the fraternity has had to work hard to change recently. the university of oklahoma president says the university's affiliation with the fraternity is permanently done. the sae has had a string of members in recent months.""" input_dict = tokenizer(ARTICLE_STUDENTS, return_tensors="tf") output_ids = model.generate(input_ids=input_dict["input_ids"], max_length=None).numpy().tolist() summary = tokenizer.batch_decode(output_ids, skip_special_tokens=True) self.assertEqual(summary, [EXPECTED_SUMMARY_STUDENTS]) # Test with the TF checkpoint model = TFEncoderDecoderModel.from_pretrained("ydshieh/bert2bert-cnn_dailymail-fp16") output_ids = model.generate(input_ids=input_dict["input_ids"], max_length=None).numpy().tolist() summary = tokenizer.batch_decode(output_ids, skip_special_tokens=True) self.assertEqual(summary, [EXPECTED_SUMMARY_STUDENTS]) @require_tf class TFGPT2EncoderDecoderModelTest(TFEncoderDecoderMixin, unittest.TestCase): def setUp(self): self.encoder_model_tester = TFBertModelTester(self, batch_size=13) self.decoder_model_tester = TFGPT2ModelTester(self) def get_pretrained_model(self): return TFEncoderDecoderModel.from_encoder_decoder_pretrained( "hf-internal-testing/tiny-random-bert", "hf-internal-testing/tiny-random-gpt2", ) def get_encoder_decoder_model(self, config, decoder_config): encoder_model = TFBertModel(config, name="encoder") decoder_model = TFGPT2LMHeadModel(decoder_config, name="decoder") return encoder_model, decoder_model def prepare_config_and_inputs(self): encoder_config_and_inputs = self.encoder_model_tester.prepare_config_and_inputs() decoder_config_and_inputs = self.decoder_model_tester.prepare_config_and_inputs_for_decoder() ( config, input_ids, token_type_ids, attention_mask, sequence_labels, token_labels, choice_labels, ) = encoder_config_and_inputs ( decoder_config, decoder_input_ids, decoder_attention_mask, decoder_head_mask, decoder_token_type_ids, decoder_sequence_labels, decoder_token_labels, decoder_choice_labels, encoder_hidden_states, encoder_attention_mask, ) = decoder_config_and_inputs # make sure that cross attention layers are added decoder_config.add_cross_attention = True # disable cache for now decoder_config.use_cache = False return { "config": config, "input_ids": input_ids, "attention_mask": attention_mask, "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_token_type_ids": decoder_token_type_ids, "decoder_attention_mask": decoder_attention_mask, "decoder_sequence_labels": decoder_sequence_labels, "decoder_token_labels": decoder_token_labels, "decoder_choice_labels": decoder_choice_labels, "encoder_hidden_states": encoder_hidden_states, "labels": decoder_token_labels, } @slow @is_pt_tf_cross_test def test_bert2gpt2_summarization(self): from transformers import EncoderDecoderModel tokenizer_in = AutoTokenizer.from_pretrained("bert-base-cased") tokenizer_out = AutoTokenizer.from_pretrained("gpt2") """Not working, because pt checkpoint has `encoder.encoder.layer...` while tf model has `encoder.bert.encoder.layer...`. (For GPT2 decoder, there is no issue) model = TFEncoderDecoderModel.from_pretrained("patrickvonplaten/bert2gpt2-cnn_dailymail-fp16", from_pt=True) """ # workaround to load from pt _model = EncoderDecoderModel.from_pretrained("patrickvonplaten/bert2gpt2-cnn_dailymail-fp16") _model.encoder.save_pretrained("./encoder") _model.decoder.save_pretrained("./decoder") model = TFEncoderDecoderModel.from_encoder_decoder_pretrained( "./encoder", "./decoder", encoder_from_pt=True, decoder_from_pt=True ) model.config = _model.config ARTICLE_STUDENTS = """(CNN)Sigma Alpha Epsilon is under fire for a video showing party-bound fraternity members singing a racist chant. SAE's national chapter suspended the students, but University of Oklahoma President David Boren took it a step further, saying the university's affiliation with the fraternity is permanently done. The news is shocking, but it's not the first time SAE has faced controversy. SAE was founded March 9, 1856, at the University of Alabama, five years before the American Civil War, according to the fraternity website. When the war began, the group had fewer than 400 members, of which "369 went to war for the Confederate States and seven for the Union Army," the website says. The fraternity now boasts more than 200,000 living alumni, along with about 15,000 undergraduates populating 219 chapters and 20 "colonies" seeking full membership at universities. SAE has had to work hard to change recently after a string of member deaths, many blamed on the hazing of new recruits, SAE national President Bradley Cohen wrote in a message on the fraternity's website. The fraternity's website lists more than 130 chapters cited or suspended for "health and safety incidents" since 2010. At least 30 of the incidents involved hazing, and dozens more involved alcohol. However, the list is missing numerous incidents from recent months. Among them, according to various media outlets: Yale University banned the SAEs from campus activities last month after members allegedly tried to interfere with a sexual misconduct investigation connected to an initiation rite. Stanford University in December suspended SAE housing privileges after finding sorority members attending a fraternity function were subjected to graphic sexual content. And Johns Hopkins University in November suspended the fraternity for underage drinking. "The media has labeled us as the 'nation's deadliest fraternity,' " Cohen said. In 2011, for example, a student died while being coerced into excessive alcohol consumption, according to a lawsuit. SAE's previous insurer dumped the fraternity. "As a result, we are paying Lloyd's of London the highest insurance rates in the Greek-letter world," Cohen said. Universities have turned down SAE's attempts to open new chapters, and the fraternity had to close 12 in 18 months over hazing incidents.""" EXPECTED_SUMMARY_STUDENTS = """SAS Alpha Epsilon suspended the students, but university president says it's permanent.\nThe fraternity has had to deal with a string of student deaths since 2010.\nSAS has more than 200,000 members, many of whom are students.\nA student died while being forced into excessive alcohol consumption.""" input_dict = tokenizer_in(ARTICLE_STUDENTS, return_tensors="tf") output_ids = model.generate(input_ids=input_dict["input_ids"], max_length=None).numpy().tolist() summary = tokenizer_out.batch_decode(output_ids, skip_special_tokens=True) self.assertEqual(summary, [EXPECTED_SUMMARY_STUDENTS]) @require_tf class TFRoBertaEncoderDecoderModelTest(TFEncoderDecoderMixin, unittest.TestCase): def setUp(self): self.encoder_model_tester = TFRobertaModelTester(self) self.decoder_model_tester = TFRobertaModelTester(self) def get_pretrained_model(self): return TFEncoderDecoderModel.from_encoder_decoder_pretrained( "hf-internal-testing/tiny-random-roberta", "hf-internal-testing/tiny-random-roberta", ) def get_encoder_decoder_model(self, config, decoder_config): encoder_model = TFRobertaModel(config, name="encoder") decoder_model = TFRobertaForCausalLM(decoder_config, name="decoder") return encoder_model, decoder_model def prepare_config_and_inputs(self): encoder_config_and_inputs = self.encoder_model_tester.prepare_config_and_inputs() decoder_config_and_inputs = self.decoder_model_tester.prepare_config_and_inputs_for_decoder() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = encoder_config_and_inputs ( decoder_config, decoder_input_ids, decoder_token_type_ids, decoder_input_mask, decoder_sequence_labels, decoder_token_labels, decoder_choice_labels, encoder_hidden_states, encoder_attention_mask, ) = decoder_config_and_inputs # make sure that cross attention layers are added decoder_config.add_cross_attention = True # disable cache for now decoder_config.use_cache = False return { "config": config, "input_ids": input_ids, "attention_mask": input_mask, "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_token_type_ids": decoder_token_type_ids, "decoder_attention_mask": decoder_input_mask, "decoder_sequence_labels": decoder_sequence_labels, "decoder_token_labels": decoder_token_labels, "decoder_choice_labels": decoder_choice_labels, "encoder_hidden_states": encoder_hidden_states, "labels": decoder_token_labels, } @require_tf class TFRembertEncoderDecoderModelTest(TFEncoderDecoderMixin, unittest.TestCase): def setUp(self): self.encoder_model_tester = TFRemBertModelTester(self) self.decoder_model_tester = TFRemBertModelTester(self) def get_pretrained_model(self): return TFEncoderDecoderModel.from_encoder_decoder_pretrained( "hf-internal-testing/tiny-random-rembert", "hf-internal-testing/tiny-random-rembert", ) def get_encoder_decoder_model(self, config, decoder_config): encoder_model = TFRemBertModel(config, name="encoder") decoder_model = TFRemBertForCausalLM(decoder_config, name="decoder") return encoder_model, decoder_model def prepare_config_and_inputs(self): encoder_config_and_inputs = self.encoder_model_tester.prepare_config_and_inputs() decoder_config_and_inputs = self.decoder_model_tester.prepare_config_and_inputs_for_decoder() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = encoder_config_and_inputs ( decoder_config, decoder_input_ids, decoder_token_type_ids, decoder_input_mask, decoder_sequence_labels, decoder_token_labels, decoder_choice_labels, encoder_hidden_states, encoder_attention_mask, ) = decoder_config_and_inputs # make sure that cross attention layers are added decoder_config.add_cross_attention = True # disable cache for now decoder_config.use_cache = False return { "config": config, "input_ids": input_ids, "attention_mask": input_mask, "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_token_type_ids": decoder_token_type_ids, "decoder_attention_mask": decoder_input_mask, "decoder_sequence_labels": decoder_sequence_labels, "decoder_token_labels": decoder_token_labels, "decoder_choice_labels": decoder_choice_labels, "encoder_hidden_states": encoder_hidden_states, "labels": decoder_token_labels, } @require_tf class TFEncoderDecoderModelTest(unittest.TestCase): def get_from_encoderdecoder_pretrained_model(self): return TFEncoderDecoderModel.from_encoder_decoder_pretrained("bert-base-cased", "bert-base-cased") def get_decoder_config(self): config = AutoConfig.from_pretrained("bert-base-cased") config.is_decoder = True config.add_cross_attention = True return config def get_encoderdecoder_model(self): return TFEncoderDecoderModel.from_pretrained("patrickvonplaten/bert2bert-cnn_dailymail-fp16") def get_encoder_decoder_models(self): encoder_model = TFBertModel.from_pretrained("bert-base-cased", name="encoder") decoder_model = TFBertLMHeadModel.from_pretrained( "bert-base-cased", config=self.get_decoder_config(), name="decoder" ) return {"encoder": encoder_model, "decoder": decoder_model} def _check_configuration_tie(self, model): assert id(model.decoder.config) == id(model.config.decoder) assert id(model.encoder.config) == id(model.config.encoder) @slow def test_configuration_tie(self): model = self.get_from_encoderdecoder_pretrained_model() self._check_configuration_tie(model) model = TFEncoderDecoderModel(**self.get_encoder_decoder_models()) self._check_configuration_tie(model) # # This should be enabled once we upload the TF version of # # "patrickvonplaten/bert2bert-cnn_dailymail-fp16" to the Hub. # model = self.get_encoderdecoder_model() # self._check_configuration_tie(model) @require_tf class TFEncoderDecoderModelSaveLoadTests(unittest.TestCase): def get_encoder_decoder_config(self): encoder_config = AutoConfig.from_pretrained("bert-base-uncased") decoder_config = AutoConfig.from_pretrained("bert-base-uncased", is_decoder=True, add_cross_attention=True) return EncoderDecoderConfig.from_encoder_decoder_configs(encoder_config, decoder_config) def get_encoder_decoder_config_small(self): encoder_config = AutoConfig.from_pretrained("hf-internal-testing/tiny-bert") decoder_config = AutoConfig.from_pretrained( "hf-internal-testing/tiny-bert", is_decoder=True, add_cross_attention=True ) return EncoderDecoderConfig.from_encoder_decoder_configs(encoder_config, decoder_config) def test_encoder_decoder_save_load_from_encoder_decoder(self): config = self.get_encoder_decoder_config_small() # create two random BERT models for bert2bert & initialize weights (+cross_attention weights) encoder = TFBertModel(config.encoder) encoder(encoder.dummy_inputs) decoder = TFBertLMHeadModel(config.decoder) decoder(decoder.dummy_inputs) encoder_decoder_orig = TFEncoderDecoderModel(encoder=encoder, decoder=decoder) input_ids = ids_tensor([13, 5], encoder.config.vocab_size) decoder_input_ids = ids_tensor([13, 1], decoder.config.vocab_size) logits_orig = encoder_decoder_orig(input_ids=input_ids, decoder_input_ids=decoder_input_ids).logits with tempfile.TemporaryDirectory() as tmp_dirname: encoder_path = os.path.join(tmp_dirname, "encoder") decoder_path = os.path.join(tmp_dirname, "decoder") encoder.save_pretrained(encoder_path) decoder.save_pretrained(decoder_path) encoder_decoder = TFEncoderDecoderModel.from_encoder_decoder_pretrained(encoder_path, decoder_path) logits_1 = encoder_decoder(input_ids=input_ids, decoder_input_ids=decoder_input_ids).logits self.assertTrue(logits_orig.numpy().sum() - logits_1.numpy().sum() < 1e-3) max_diff = np.max(np.abs(logits_1.numpy() - logits_orig.numpy())) self.assertAlmostEqual(max_diff, 0.0, places=4) with tempfile.TemporaryDirectory() as tmp_dirname: encoder_decoder.save_pretrained(tmp_dirname) encoder_decoder = TFEncoderDecoderModel.from_pretrained(tmp_dirname) logits_2 = encoder_decoder(input_ids=input_ids, decoder_input_ids=decoder_input_ids).logits max_diff = np.max(np.abs(logits_2.numpy() - logits_orig.numpy())) self.assertAlmostEqual(max_diff, 0.0, places=4) @require_torch @is_pt_tf_cross_test def test_encoder_decoder_save_load_from_encoder_decoder_from_pt(self): config = self.get_encoder_decoder_config_small() # create two random BERT models for bert2bert & initialize weights (+cross_attention weights) encoder_pt = BertModel(config.encoder).to(torch_device).eval() decoder_pt = BertLMHeadModel(config.decoder).to(torch_device).eval() encoder_decoder_pt = EncoderDecoderModel(encoder=encoder_pt, decoder=decoder_pt).to(torch_device).eval() input_ids = ids_tensor([13, 5], encoder_pt.config.vocab_size) decoder_input_ids = ids_tensor([13, 1], decoder_pt.config.vocab_size) pt_input_ids = torch.tensor(input_ids.numpy(), device=torch_device, dtype=torch.long) pt_decoder_input_ids = torch.tensor(decoder_input_ids.numpy(), device=torch_device, dtype=torch.long) logits_pt = encoder_decoder_pt(input_ids=pt_input_ids, decoder_input_ids=pt_decoder_input_ids).logits # PyTorch => TensorFlow with tempfile.TemporaryDirectory() as tmp_dirname_1, tempfile.TemporaryDirectory() as tmp_dirname_2: encoder_decoder_pt.encoder.save_pretrained(tmp_dirname_1) encoder_decoder_pt.decoder.save_pretrained(tmp_dirname_2) encoder_decoder_tf = TFEncoderDecoderModel.from_encoder_decoder_pretrained( tmp_dirname_1, tmp_dirname_2, encoder_from_pt=True, decoder_from_pt=True ) logits_tf = encoder_decoder_tf(input_ids=input_ids, decoder_input_ids=decoder_input_ids).logits max_diff = np.max(np.abs(logits_pt.detach().cpu().numpy() - logits_tf.numpy())) self.assertAlmostEqual(max_diff, 0.0, places=3) # Make sure `from_pretrained` following `save_pretrained` work and give the same result with tempfile.TemporaryDirectory() as tmp_dirname: encoder_decoder_tf.save_pretrained(tmp_dirname) encoder_decoder_tf = TFEncoderDecoderModel.from_pretrained(tmp_dirname) logits_tf_2 = encoder_decoder_tf(input_ids=input_ids, decoder_input_ids=decoder_input_ids).logits max_diff = np.max(np.abs(logits_tf_2.numpy() - logits_tf.numpy())) self.assertAlmostEqual(max_diff, 0.0, places=3) # TensorFlow => PyTorch with tempfile.TemporaryDirectory() as tmp_dirname: encoder_decoder_tf.save_pretrained(tmp_dirname) encoder_decoder_pt = EncoderDecoderModel.from_pretrained(tmp_dirname, from_tf=True) max_diff = np.max(np.abs(logits_pt.detach().cpu().numpy() - logits_tf.numpy())) self.assertAlmostEqual(max_diff, 0.0, places=3) @slow def test_encoder_decoder_from_pretrained(self): load_weight_prefix = TFEncoderDecoderModel.load_weight_prefix config = self.get_encoder_decoder_config() encoder_tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") decoder_tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") input_ids = encoder_tokenizer("who sings does he love me with reba", return_tensors="tf").input_ids decoder_input_ids = decoder_tokenizer("Linda Davis", return_tensors="tf").input_ids with tempfile.TemporaryDirectory() as tmp_dirname: # Since most of HF's models don't have pretrained cross-attention layers, they are randomly # initialized even if we create models using `from_pretrained` method. # For the tests, the decoder need to be a model with pretrained cross-attention layers. # So we create pretrained models (without `load_weight_prefix`), save them, and later, # we load them using `from_pretrained`. # (we don't need to do this for encoder, but let's make the code more similar between encoder/decoder) encoder = TFAutoModel.from_pretrained("bert-base-uncased", name="encoder") # It's necessary to specify `add_cross_attention=True` here. decoder = TFAutoModelForCausalLM.from_pretrained( "bert-base-uncased", is_decoder=True, add_cross_attention=True, name="decoder" ) pretrained_encoder_dir = os.path.join(tmp_dirname, "pretrained_encoder") pretrained_decoder_dir = os.path.join(tmp_dirname, "pretrained_decoder") encoder.save_pretrained(pretrained_encoder_dir) decoder.save_pretrained(pretrained_decoder_dir) del encoder del decoder enc_dec_model = TFEncoderDecoderModel.from_encoder_decoder_pretrained( pretrained_encoder_dir, pretrained_decoder_dir, ) # check that the from pretrained methods work enc_dec_model.save_pretrained(tmp_dirname) enc_dec_model = TFEncoderDecoderModel.from_pretrained(tmp_dirname) output = enc_dec_model(input_ids, decoder_input_ids=decoder_input_ids, labels=decoder_input_ids) loss_pretrained = output.loss del enc_dec_model # Create the model using `__init__` with loaded ``pretrained`` encoder / decoder encoder = TFAutoModel.from_pretrained( pretrained_encoder_dir, load_weight_prefix=load_weight_prefix, name="encoder" ) decoder = TFAutoModelForCausalLM.from_pretrained( pretrained_decoder_dir, load_weight_prefix=load_weight_prefix, name="decoder" ) enc_dec_model = TFEncoderDecoderModel(config=config, encoder=encoder, decoder=decoder) output = enc_dec_model(input_ids, decoder_input_ids=decoder_input_ids, labels=decoder_input_ids) loss_init = output.loss max_diff = np.max(np.abs(loss_pretrained - loss_init)) expected_diff = 0.0 self.assertAlmostEqual(max_diff, expected_diff, places=4)
# coding=utf-8 # Copyright 2020 HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import os import tempfile import unittest import numpy as np from transformers import is_tf_available, is_torch_available from transformers.testing_utils import is_pt_tf_cross_test, require_tf, require_torch, slow, torch_device from transformers.utils.generic import ModelOutput from ...test_modeling_tf_common import ids_tensor from ..bert.test_modeling_tf_bert import TFBertModelTester from ..gpt2.test_modeling_tf_gpt2 import TFGPT2ModelTester from ..rembert.test_modeling_tf_rembert import TFRemBertModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester if is_tf_available(): import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EncoderDecoderConfig, TFAutoModel, TFAutoModelForCausalLM, TFBertLMHeadModel, TFBertModel, TFEncoderDecoderModel, TFGPT2LMHeadModel, TFRemBertForCausalLM, TFRemBertModel, TFRobertaForCausalLM, TFRobertaModel, ) from transformers.modeling_tf_outputs import TFBaseModelOutput if is_torch_available(): import torch from transformers import BertLMHeadModel, BertModel, EncoderDecoderModel @require_tf class TFEncoderDecoderMixin: def get_encoder_decoder_model(self, config, decoder_config): raise NotImplementedError def prepare_config_and_inputs(self): raise NotImplementedError def get_pretrained_model(self): raise NotImplementedError def check_encoder_decoder_model_from_pretrained_configs( self, config, input_ids, attention_mask, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs ): encoder_decoder_config = EncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) self.assertTrue(encoder_decoder_config.decoder.is_decoder) enc_dec_model = TFEncoderDecoderModel(encoder_decoder_config) self.assertTrue(enc_dec_model.config.is_encoder_decoder) outputs_encoder_decoder = enc_dec_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, kwargs=kwargs, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) self.assertEqual( outputs_encoder_decoder["encoder_last_hidden_state"].shape, (input_ids.shape + (config.hidden_size,)) ) def check_encoder_decoder_model( self, config, input_ids, attention_mask, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = TFEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) self.assertTrue(enc_dec_model.config.decoder.is_decoder) self.assertTrue(enc_dec_model.config.decoder.add_cross_attention) self.assertTrue(enc_dec_model.config.is_encoder_decoder) outputs_encoder_decoder = enc_dec_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, kwargs=kwargs, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) self.assertEqual( outputs_encoder_decoder["encoder_last_hidden_state"].shape, (input_ids.shape + (config.hidden_size,)) ) encoder_outputs = TFBaseModelOutput(last_hidden_state=encoder_hidden_states) outputs_encoder_decoder = enc_dec_model( input_ids=None, encoder_outputs=encoder_outputs, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, kwargs=kwargs, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) self.assertEqual( outputs_encoder_decoder["encoder_last_hidden_state"].shape, (input_ids.shape + (config.hidden_size,)) ) def check_encoder_decoder_model_from_pretrained( self, config, input_ids, attention_mask, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, return_dict, **kwargs ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model, "return_dict": return_dict} enc_dec_model = TFEncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs) outputs_encoder_decoder = enc_dec_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, return_dict=True, kwargs=kwargs, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) self.assertEqual( outputs_encoder_decoder["encoder_last_hidden_state"].shape, (input_ids.shape + (config.hidden_size,)) ) def check_save_and_load( self, config, input_ids, attention_mask, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = TFEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) outputs = enc_dec_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, kwargs=kwargs, ) out_2 = np.array(outputs[0]) out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmpdirname: enc_dec_model.save_pretrained(tmpdirname) enc_dec_model = TFEncoderDecoderModel.from_pretrained(tmpdirname) after_outputs = enc_dec_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, kwargs=kwargs, ) out_1 = np.array(after_outputs[0]) out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) def check_encoder_decoder_model_labels( self, config, input_ids, attention_mask, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, labels, **kwargs ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = TFEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) outputs_encoder_decoder = enc_dec_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, labels=labels, kwargs=kwargs, ) # Make sure `loss` exist self.assertIn("loss", outputs_encoder_decoder) batch_size, seq_len = decoder_input_ids.shape expected_shape = (batch_size, seq_len, decoder_config.vocab_size) self.assertEqual(outputs_encoder_decoder["logits"].shape, expected_shape) self.assertEqual( outputs_encoder_decoder["encoder_last_hidden_state"].shape, (input_ids.shape + (config.hidden_size,)) ) def _check_output_with_attentions( self, outputs_encoder_decoder, config, input_ids, decoder_config, decoder_input_ids ): encoder_attentions = outputs_encoder_decoder["encoder_attentions"] self.assertEqual(len(encoder_attentions), config.num_hidden_layers) self.assertEqual( encoder_attentions[0].shape[-3:], (config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) ) decoder_attentions = outputs_encoder_decoder["decoder_attentions"] num_decoder_layers = ( decoder_config.num_decoder_layers if hasattr(decoder_config, "num_decoder_layers") else decoder_config.num_hidden_layers ) self.assertEqual(len(decoder_attentions), num_decoder_layers) self.assertEqual( decoder_attentions[0].shape[-3:], (decoder_config.num_attention_heads, decoder_input_ids.shape[-1], decoder_input_ids.shape[-1]), ) cross_attentions = outputs_encoder_decoder["cross_attentions"] self.assertEqual(len(cross_attentions), num_decoder_layers) cross_attention_input_seq_len = decoder_input_ids.shape[-1] * ( 1 + (decoder_config.ngram if hasattr(decoder_config, "ngram") else 0) ) self.assertEqual( cross_attentions[0].shape[-3:], (decoder_config.num_attention_heads, cross_attention_input_seq_len, input_ids.shape[-1]), ) def check_encoder_decoder_model_output_attentions( self, config, input_ids, attention_mask, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs ): # make the decoder inputs a different shape from the encoder inputs to harden the test decoder_input_ids = decoder_input_ids[:, :-1] decoder_attention_mask = decoder_attention_mask[:, :-1] encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = TFEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) outputs_encoder_decoder = enc_dec_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, output_attentions=True, kwargs=kwargs, ) self._check_output_with_attentions( outputs_encoder_decoder, config, input_ids, decoder_config, decoder_input_ids ) def check_encoder_decoder_model_output_attentions_from_config( self, config, input_ids, attention_mask, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs ): # Similar to `check_encoder_decoder_model_output_attentions`, but with `output_attentions` triggered from the # config file. Contrarily to most models, changing the model's config won't work -- the defaults are loaded # from the inner models' configurations. decoder_input_ids = decoder_input_ids[:, :-1] decoder_attention_mask = decoder_attention_mask[:, :-1] encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = TFEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) enc_dec_model.config.output_attentions = True # model config -> won't work outputs_encoder_decoder = enc_dec_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, kwargs=kwargs, ) self.assertTrue( all( key not in outputs_encoder_decoder for key in ["encoder_attentions", "decoder_attentions", "cross_attentions"] ) ) config.output_attentions = True # inner model config -> will work decoder_config.output_attentions = True encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = TFEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) outputs_encoder_decoder = enc_dec_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, kwargs=kwargs, ) self._check_output_with_attentions( outputs_encoder_decoder, config, input_ids, decoder_config, decoder_input_ids ) def check_encoder_decoder_model_generate(self, input_ids, config, decoder_config, **kwargs): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = TFEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) # Generate until max length if hasattr(enc_dec_model.config, "eos_token_id"): enc_dec_model.config.eos_token_id = None if hasattr(enc_dec_model.config, "decoder") and hasattr(enc_dec_model.config.decoder, "eos_token_id"): enc_dec_model.config.decoder.eos_token_id = None # Bert does not have a bos token id, so use pad_token_id instead generated_output = enc_dec_model.generate( input_ids, decoder_start_token_id=enc_dec_model.config.decoder.pad_token_id ) self.assertEqual(tuple(generated_output.shape.as_list()), (input_ids.shape[0],) + (decoder_config.max_length,)) def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-5, name="outputs", attributes=None): """Check the outputs from PyTorch and TensorFlow models are close enough. Checks are done in a recursive way. Args: model_class: The class of the model that is currently testing. For example, `TFBertModel`, TFBertForMaskedLM`, `TFBertForSequenceClassification`, etc. Mainly used for providing more informative error messages. name (`str`): The name of the output. For example, `output.hidden_states`, `output.attentions`, etc. attributes (`Tuple[str]`): The names of the output's element if the output is a tuple/list with each element being a named field in the output. """ self.assertEqual(type(name), str) if attributes is not None: self.assertEqual(type(attributes), tuple, f"{name}: The argument `attributes` should be a `tuple`") # Allow `ModelOutput` (e.g. `CLIPOutput` has `text_model_output` and `vision_model_output`). if isinstance(tf_outputs, ModelOutput): self.assertTrue( isinstance(pt_outputs, ModelOutput), f"{name}: `pt_outputs` should an instance of `ModelOutput` when `tf_outputs` is", ) tf_keys = [k for k, v in tf_outputs.items() if v is not None] pt_keys = [k for k, v in pt_outputs.items() if v is not None] self.assertEqual(tf_keys, pt_keys, f"{name}: Output keys differ between TF and PyTorch") # convert to the case of `tuple` # appending each key to the current (string) `names` attributes = tuple([f"{name}.{k}" for k in tf_keys]) self.check_pt_tf_outputs( tf_outputs.to_tuple(), pt_outputs.to_tuple(), model_class, tol=tol, name=name, attributes=attributes ) # Allow `list` (e.g. `TransfoXLModelOutput.mems` is a list of tensors.) elif type(tf_outputs) in [tuple, list]: self.assertEqual(type(tf_outputs), type(pt_outputs), f"{name}: Output types differ between TF and PyTorch") self.assertEqual(len(tf_outputs), len(pt_outputs), f"{name}: Output lengths differ between TF and PyTorch") if attributes is not None: # case 1: each output has assigned name (e.g. a tuple form of a `ModelOutput`) self.assertEqual( len(attributes), len(tf_outputs), f"{name}: The tuple `names` should have the same length as `tf_outputs`", ) else: # case 2: each output has no assigned name (e.g. hidden states of each layer) -> add an index to `names` attributes = tuple([f"{name}_{idx}" for idx in range(len(tf_outputs))]) for tf_output, pt_output, attr in zip(tf_outputs, pt_outputs, attributes): self.check_pt_tf_outputs(tf_output, pt_output, model_class, tol=tol, name=attr) elif isinstance(tf_outputs, tf.Tensor): self.assertTrue( isinstance(pt_outputs, torch.Tensor), f"{name}: `pt_outputs` should a tensor when `tf_outputs` is" ) tf_outputs = tf_outputs.numpy() pt_outputs = pt_outputs.detach().to("cpu").numpy() self.assertEqual( tf_outputs.shape, pt_outputs.shape, f"{name}: Output shapes differ between TF and PyTorch" ) # deal with NumPy's scalars to make replacing nan values by 0 work. if np.isscalar(tf_outputs): tf_outputs = np.array([tf_outputs]) pt_outputs = np.array([pt_outputs]) tf_nans = np.isnan(tf_outputs) pt_nans = np.isnan(pt_outputs) pt_outputs[tf_nans] = 0 tf_outputs[tf_nans] = 0 pt_outputs[pt_nans] = 0 tf_outputs[pt_nans] = 0 max_diff = np.amax(np.abs(tf_outputs - pt_outputs)) self.assertLessEqual(max_diff, tol, f"{name}: Difference between torch and tf is {max_diff} (>= {tol}).") else: raise ValueError( "`tf_outputs` should be an instance of `tf.Tensor`, a `tuple`, or an instance of `tf.Tensor`. Got" f" {type(tf_outputs)} instead." ) def prepare_pt_inputs_from_tf_inputs(self, tf_inputs_dict): pt_inputs_dict = {} for name, key in tf_inputs_dict.items(): if type(key) == bool: pt_inputs_dict[name] = key elif name == "input_values": pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) elif name == "pixel_values": pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) elif name == "input_features": pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) # other general float inputs elif tf_inputs_dict[name].dtype.is_floating: pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) else: pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.long) return pt_inputs_dict def check_pt_tf_models(self, tf_model, pt_model, tf_inputs_dict): pt_inputs_dict = self.prepare_pt_inputs_from_tf_inputs(tf_inputs_dict) # send pytorch inputs to the correct device pt_inputs_dict = { k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs_dict.items() } # send pytorch model to the correct device pt_model.to(torch_device) # Check predictions on first output (logits/hidden-states) are close enough given low-level computational differences pt_model.eval() with torch.no_grad(): pt_outputs = pt_model(**pt_inputs_dict) tf_outputs = tf_model(tf_inputs_dict) # tf models returned loss is usually a tensor rather than a scalar. # (see `hf_compute_loss`: it uses `tf.keras.losses.Reduction.NONE`) # Change it here to a scalar to match PyTorch models' loss tf_loss = getattr(tf_outputs, "loss", None) if tf_loss is not None: tf_outputs.loss = tf.math.reduce_mean(tf_loss) self.check_pt_tf_outputs(tf_outputs, pt_outputs, type(tf_model)) def check_pt_tf_equivalence(self, tf_model, pt_model, tf_inputs_dict): """Wrap `check_pt_tf_models` to further check PT -> TF again""" self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict) # PT -> TF with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) tf_model = TFEncoderDecoderModel.from_pretrained(tmpdirname, from_pt=True) self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict) def check_pt_to_tf_equivalence(self, config, decoder_config, tf_inputs_dict): """EncoderDecoderModel requires special way to cross load (PT -> TF)""" encoder_decoder_config = EncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) # Output all for aggressive testing encoder_decoder_config.output_hidden_states = True # All models tested in this file have attentions encoder_decoder_config.output_attentions = True pt_model = EncoderDecoderModel(encoder_decoder_config) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) tf_model = TFEncoderDecoderModel.from_pretrained(tmpdirname, from_pt=True) self.check_pt_tf_equivalence(tf_model, pt_model, tf_inputs_dict) def check_tf_to_pt_equivalence(self, config, decoder_config, tf_inputs_dict): """EncoderDecoderModel requires special way to cross load (TF -> PT)""" encoder_decoder_config = EncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) # Output all for aggressive testing encoder_decoder_config.output_hidden_states = True # TODO: A generalizable way to determine this attribute encoder_decoder_config.output_attentions = True tf_model = TFEncoderDecoderModel(encoder_decoder_config) # Make sure model is built before saving tf_model(**tf_inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: tf_model.save_pretrained(tmpdirname) pt_model = EncoderDecoderModel.from_pretrained(tmpdirname, from_tf=True) self.check_pt_tf_equivalence(tf_model, pt_model, tf_inputs_dict) def test_encoder_decoder_model(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model(**input_ids_dict) def test_encoder_decoder_model_from_pretrained_configs(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained_configs(**input_ids_dict) def test_encoder_decoder_model_from_pretrained(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained(**input_ids_dict, return_dict=False) def test_encoder_decoder_model_from_pretrained_return_dict(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained(**input_ids_dict, return_dict=True) def test_save_and_load_from_pretrained(self): input_ids_dict = self.prepare_config_and_inputs() self.check_save_and_load(**input_ids_dict) def test_encoder_decoder_model_labels(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_labels(**input_ids_dict) def test_encoder_decoder_model_output_attentions(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_output_attentions(**input_ids_dict) def test_encoder_decoder_model_output_attentions_from_config(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_output_attentions_from_config(**input_ids_dict) def test_encoder_decoder_model_generate(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_generate(**input_ids_dict) def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float): diff = np.abs((a - b)).max() self.assertLessEqual(diff, tol, f"Difference between torch and tf is {diff} (>= {tol}).") @is_pt_tf_cross_test def test_pt_tf_model_equivalence(self): config_inputs_dict = self.prepare_config_and_inputs() labels = config_inputs_dict.pop("decoder_token_labels") # Keep only common arguments arg_names = [ "config", "input_ids", "attention_mask", "decoder_config", "decoder_input_ids", "decoder_attention_mask", "encoder_hidden_states", ] config_inputs_dict = {k: v for k, v in config_inputs_dict.items() if k in arg_names} config = config_inputs_dict.pop("config") decoder_config = config_inputs_dict.pop("decoder_config") # Output all for aggressive testing config.output_hidden_states = True decoder_config.output_hidden_states = True # All models tested in this file have attentions config.output_attentions = True decoder_config.output_attentions = True tf_inputs_dict = config_inputs_dict # `encoder_hidden_states` is not used in model call/forward del tf_inputs_dict["encoder_hidden_states"] # Make sure no sequence has all zeros as attention mask, otherwise some tests fail due to the inconsistency # of the usage `1e-4`, `1e-9`, `1e-30`, `-inf`. for k in ["attention_mask", "decoder_attention_mask"]: attention_mask = tf_inputs_dict[k] # Make sure no all 0s attention masks - to avoid failure at this moment. # Put `1` at the beginning of sequences to make it still work when combining causal attention masks. # TODO: remove this line once a fix regarding large negative values for attention mask is done. attention_mask = tf.concat( [tf.ones_like(attention_mask[:, :1], dtype=attention_mask.dtype), attention_mask[:, 1:]], axis=-1 ) tf_inputs_dict[k] = attention_mask tf_inputs_dict_with_labels = copy.copy(tf_inputs_dict) tf_inputs_dict_with_labels["labels"] = labels self.assertTrue(decoder_config.cross_attention_hidden_size is None) # Original test: check without `labels` and without `enc_to_dec_proj` projection self.assertTrue(config.hidden_size == decoder_config.hidden_size) self.check_pt_to_tf_equivalence(config, decoder_config, tf_inputs_dict) self.check_tf_to_pt_equivalence(config, decoder_config, tf_inputs_dict) # check with `labels` self.check_pt_to_tf_equivalence(config, decoder_config, tf_inputs_dict_with_labels) self.check_tf_to_pt_equivalence(config, decoder_config, tf_inputs_dict_with_labels) # check `enc_to_dec_proj` work as expected decoder_config.hidden_size = decoder_config.hidden_size * 2 self.assertTrue(config.hidden_size != decoder_config.hidden_size) self.check_pt_to_tf_equivalence(config, decoder_config, tf_inputs_dict) self.check_tf_to_pt_equivalence(config, decoder_config, tf_inputs_dict) def test_model_save_load_from_pretrained(self): model_2 = self.get_pretrained_model() input_ids = ids_tensor([13, 5], model_2.config.encoder.vocab_size) decoder_input_ids = ids_tensor([13, 1], model_2.config.decoder.vocab_size) attention_mask = ids_tensor([13, 5], vocab_size=2) outputs = model_2( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, ) out_2 = np.array(outputs[0]) out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmp_dirname: model_2.save_pretrained(tmp_dirname) model_1 = TFEncoderDecoderModel.from_pretrained(tmp_dirname) after_outputs = model_1( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, ) out_1 = np.array(after_outputs[0]) out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) @require_tf class TFBertEncoderDecoderModelTest(TFEncoderDecoderMixin, unittest.TestCase): def setUp(self): self.encoder_model_tester = TFBertModelTester(self, batch_size=13) self.decoder_model_tester = TFBertModelTester(self, batch_size=13) def get_pretrained_model(self): return TFEncoderDecoderModel.from_encoder_decoder_pretrained( "hf-internal-testing/tiny-random-bert", "hf-internal-testing/tiny-random-bert", ) def get_encoder_decoder_model(self, config, decoder_config): encoder_model = TFBertModel(config, name="encoder") decoder_model = TFBertLMHeadModel(decoder_config, name="decoder") return encoder_model, decoder_model def prepare_config_and_inputs(self): encoder_config_and_inputs = self.encoder_model_tester.prepare_config_and_inputs() decoder_config_and_inputs = self.decoder_model_tester.prepare_config_and_inputs_for_decoder() ( config, input_ids, token_type_ids, attention_mask, sequence_labels, token_labels, choice_labels, ) = encoder_config_and_inputs ( decoder_config, decoder_input_ids, decoder_token_type_ids, decoder_attention_mask, decoder_sequence_labels, decoder_token_labels, decoder_choice_labels, encoder_hidden_states, encoder_attention_mask, ) = decoder_config_and_inputs # make sure that cross attention layers are added decoder_config.add_cross_attention = True # disable cache for now decoder_config.use_cache = False return { "config": config, "input_ids": input_ids, "attention_mask": attention_mask, "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_token_type_ids": decoder_token_type_ids, "decoder_attention_mask": decoder_attention_mask, "decoder_sequence_labels": decoder_sequence_labels, "decoder_token_labels": decoder_token_labels, "decoder_choice_labels": decoder_choice_labels, "encoder_hidden_states": encoder_hidden_states, "labels": decoder_token_labels, } @slow @is_pt_tf_cross_test def test_bert2bert_summarization(self): from transformers import EncoderDecoderModel tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") """Not working, because pt checkpoint has `encoder.encoder.layer...` while tf model has `encoder.bert.encoder.layer...`. (For Bert decoder, there is no issue, because `BertModel` is wrapped into `decoder` as `bert`) model = TFEncoderDecoderModel.from_pretrained("patrickvonplaten/bert2bert-cnn_dailymail-fp16", from_pt=True) """ # workaround to load from pt _model = EncoderDecoderModel.from_pretrained("patrickvonplaten/bert2bert-cnn_dailymail-fp16") _model.encoder.save_pretrained("./encoder") _model.decoder.save_pretrained("./decoder") model = TFEncoderDecoderModel.from_encoder_decoder_pretrained( "./encoder", "./decoder", encoder_from_pt=True, decoder_from_pt=True ) model.config = _model.config ARTICLE_STUDENTS = """(CNN)Sigma Alpha Epsilon is under fire for a video showing party-bound fraternity members singing a racist chant. SAE's national chapter suspended the students, but University of Oklahoma President David Boren took it a step further, saying the university's affiliation with the fraternity is permanently done. The news is shocking, but it's not the first time SAE has faced controversy. SAE was founded March 9, 1856, at the University of Alabama, five years before the American Civil War, according to the fraternity website. When the war began, the group had fewer than 400 members, of which "369 went to war for the Confederate States and seven for the Union Army," the website says. The fraternity now boasts more than 200,000 living alumni, along with about 15,000 undergraduates populating 219 chapters and 20 "colonies" seeking full membership at universities. SAE has had to work hard to change recently after a string of member deaths, many blamed on the hazing of new recruits, SAE national President Bradley Cohen wrote in a message on the fraternity's website. The fraternity's website lists more than 130 chapters cited or suspended for "health and safety incidents" since 2010. At least 30 of the incidents involved hazing, and dozens more involved alcohol. However, the list is missing numerous incidents from recent months. Among them, according to various media outlets: Yale University banned the SAEs from campus activities last month after members allegedly tried to interfere with a sexual misconduct investigation connected to an initiation rite. Stanford University in December suspended SAE housing privileges after finding sorority members attending a fraternity function were subjected to graphic sexual content. And Johns Hopkins University in November suspended the fraternity for underage drinking. "The media has labeled us as the 'nation's deadliest fraternity,' " Cohen said. In 2011, for example, a student died while being coerced into excessive alcohol consumption, according to a lawsuit. SAE's previous insurer dumped the fraternity. "As a result, we are paying Lloyd's of London the highest insurance rates in the Greek-letter world," Cohen said. Universities have turned down SAE's attempts to open new chapters, and the fraternity had to close 12 in 18 months over hazing incidents.""" EXPECTED_SUMMARY_STUDENTS = """sae was founded in 1856, five years before the civil war. the fraternity has had to work hard to change recently. the university of oklahoma president says the university's affiliation with the fraternity is permanently done. the sae has had a string of members in recent months.""" input_dict = tokenizer(ARTICLE_STUDENTS, return_tensors="tf") output_ids = model.generate(input_ids=input_dict["input_ids"], max_length=None).numpy().tolist() summary = tokenizer.batch_decode(output_ids, skip_special_tokens=True) self.assertEqual(summary, [EXPECTED_SUMMARY_STUDENTS]) # Test with the TF checkpoint model = TFEncoderDecoderModel.from_pretrained("ydshieh/bert2bert-cnn_dailymail-fp16") output_ids = model.generate(input_ids=input_dict["input_ids"], max_length=None).numpy().tolist() summary = tokenizer.batch_decode(output_ids, skip_special_tokens=True) self.assertEqual(summary, [EXPECTED_SUMMARY_STUDENTS]) @require_tf class TFGPT2EncoderDecoderModelTest(TFEncoderDecoderMixin, unittest.TestCase): def setUp(self): self.encoder_model_tester = TFBertModelTester(self, batch_size=13) self.decoder_model_tester = TFGPT2ModelTester(self) def get_pretrained_model(self): return TFEncoderDecoderModel.from_encoder_decoder_pretrained( "hf-internal-testing/tiny-random-bert", "hf-internal-testing/tiny-random-gpt2", ) def get_encoder_decoder_model(self, config, decoder_config): encoder_model = TFBertModel(config, name="encoder") decoder_model = TFGPT2LMHeadModel(decoder_config, name="decoder") return encoder_model, decoder_model def prepare_config_and_inputs(self): encoder_config_and_inputs = self.encoder_model_tester.prepare_config_and_inputs() decoder_config_and_inputs = self.decoder_model_tester.prepare_config_and_inputs_for_decoder() ( config, input_ids, token_type_ids, attention_mask, sequence_labels, token_labels, choice_labels, ) = encoder_config_and_inputs ( decoder_config, decoder_input_ids, decoder_attention_mask, decoder_head_mask, decoder_token_type_ids, decoder_sequence_labels, decoder_token_labels, decoder_choice_labels, encoder_hidden_states, encoder_attention_mask, ) = decoder_config_and_inputs # make sure that cross attention layers are added decoder_config.add_cross_attention = True # disable cache for now decoder_config.use_cache = False return { "config": config, "input_ids": input_ids, "attention_mask": attention_mask, "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_token_type_ids": decoder_token_type_ids, "decoder_attention_mask": decoder_attention_mask, "decoder_sequence_labels": decoder_sequence_labels, "decoder_token_labels": decoder_token_labels, "decoder_choice_labels": decoder_choice_labels, "encoder_hidden_states": encoder_hidden_states, "labels": decoder_token_labels, } @slow @is_pt_tf_cross_test def test_bert2gpt2_summarization(self): from transformers import EncoderDecoderModel tokenizer_in = AutoTokenizer.from_pretrained("bert-base-cased") tokenizer_out = AutoTokenizer.from_pretrained("gpt2") """Not working, because pt checkpoint has `encoder.encoder.layer...` while tf model has `encoder.bert.encoder.layer...`. (For GPT2 decoder, there is no issue) model = TFEncoderDecoderModel.from_pretrained("patrickvonplaten/bert2gpt2-cnn_dailymail-fp16", from_pt=True) """ # workaround to load from pt _model = EncoderDecoderModel.from_pretrained("patrickvonplaten/bert2gpt2-cnn_dailymail-fp16") _model.encoder.save_pretrained("./encoder") _model.decoder.save_pretrained("./decoder") model = TFEncoderDecoderModel.from_encoder_decoder_pretrained( "./encoder", "./decoder", encoder_from_pt=True, decoder_from_pt=True ) model.config = _model.config ARTICLE_STUDENTS = """(CNN)Sigma Alpha Epsilon is under fire for a video showing party-bound fraternity members singing a racist chant. SAE's national chapter suspended the students, but University of Oklahoma President David Boren took it a step further, saying the university's affiliation with the fraternity is permanently done. The news is shocking, but it's not the first time SAE has faced controversy. SAE was founded March 9, 1856, at the University of Alabama, five years before the American Civil War, according to the fraternity website. When the war began, the group had fewer than 400 members, of which "369 went to war for the Confederate States and seven for the Union Army," the website says. The fraternity now boasts more than 200,000 living alumni, along with about 15,000 undergraduates populating 219 chapters and 20 "colonies" seeking full membership at universities. SAE has had to work hard to change recently after a string of member deaths, many blamed on the hazing of new recruits, SAE national President Bradley Cohen wrote in a message on the fraternity's website. The fraternity's website lists more than 130 chapters cited or suspended for "health and safety incidents" since 2010. At least 30 of the incidents involved hazing, and dozens more involved alcohol. However, the list is missing numerous incidents from recent months. Among them, according to various media outlets: Yale University banned the SAEs from campus activities last month after members allegedly tried to interfere with a sexual misconduct investigation connected to an initiation rite. Stanford University in December suspended SAE housing privileges after finding sorority members attending a fraternity function were subjected to graphic sexual content. And Johns Hopkins University in November suspended the fraternity for underage drinking. "The media has labeled us as the 'nation's deadliest fraternity,' " Cohen said. In 2011, for example, a student died while being coerced into excessive alcohol consumption, according to a lawsuit. SAE's previous insurer dumped the fraternity. "As a result, we are paying Lloyd's of London the highest insurance rates in the Greek-letter world," Cohen said. Universities have turned down SAE's attempts to open new chapters, and the fraternity had to close 12 in 18 months over hazing incidents.""" EXPECTED_SUMMARY_STUDENTS = """SAS Alpha Epsilon suspended the students, but university president says it's permanent.\nThe fraternity has had to deal with a string of student deaths since 2010.\nSAS has more than 200,000 members, many of whom are students.\nA student died while being forced into excessive alcohol consumption.""" input_dict = tokenizer_in(ARTICLE_STUDENTS, return_tensors="tf") output_ids = model.generate(input_ids=input_dict["input_ids"], max_length=None).numpy().tolist() summary = tokenizer_out.batch_decode(output_ids, skip_special_tokens=True) self.assertEqual(summary, [EXPECTED_SUMMARY_STUDENTS]) @require_tf class TFRoBertaEncoderDecoderModelTest(TFEncoderDecoderMixin, unittest.TestCase): def setUp(self): self.encoder_model_tester = TFRobertaModelTester(self) self.decoder_model_tester = TFRobertaModelTester(self) def get_pretrained_model(self): return TFEncoderDecoderModel.from_encoder_decoder_pretrained( "hf-internal-testing/tiny-random-roberta", "hf-internal-testing/tiny-random-roberta", ) def get_encoder_decoder_model(self, config, decoder_config): encoder_model = TFRobertaModel(config, name="encoder") decoder_model = TFRobertaForCausalLM(decoder_config, name="decoder") return encoder_model, decoder_model def prepare_config_and_inputs(self): encoder_config_and_inputs = self.encoder_model_tester.prepare_config_and_inputs() decoder_config_and_inputs = self.decoder_model_tester.prepare_config_and_inputs_for_decoder() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = encoder_config_and_inputs ( decoder_config, decoder_input_ids, decoder_token_type_ids, decoder_input_mask, decoder_sequence_labels, decoder_token_labels, decoder_choice_labels, encoder_hidden_states, encoder_attention_mask, ) = decoder_config_and_inputs # make sure that cross attention layers are added decoder_config.add_cross_attention = True # disable cache for now decoder_config.use_cache = False return { "config": config, "input_ids": input_ids, "attention_mask": input_mask, "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_token_type_ids": decoder_token_type_ids, "decoder_attention_mask": decoder_input_mask, "decoder_sequence_labels": decoder_sequence_labels, "decoder_token_labels": decoder_token_labels, "decoder_choice_labels": decoder_choice_labels, "encoder_hidden_states": encoder_hidden_states, "labels": decoder_token_labels, } @require_tf class TFRembertEncoderDecoderModelTest(TFEncoderDecoderMixin, unittest.TestCase): def setUp(self): self.encoder_model_tester = TFRemBertModelTester(self) self.decoder_model_tester = TFRemBertModelTester(self) def get_pretrained_model(self): return TFEncoderDecoderModel.from_encoder_decoder_pretrained( "hf-internal-testing/tiny-random-rembert", "hf-internal-testing/tiny-random-rembert", ) def get_encoder_decoder_model(self, config, decoder_config): encoder_model = TFRemBertModel(config, name="encoder") decoder_model = TFRemBertForCausalLM(decoder_config, name="decoder") return encoder_model, decoder_model def prepare_config_and_inputs(self): encoder_config_and_inputs = self.encoder_model_tester.prepare_config_and_inputs() decoder_config_and_inputs = self.decoder_model_tester.prepare_config_and_inputs_for_decoder() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = encoder_config_and_inputs ( decoder_config, decoder_input_ids, decoder_token_type_ids, decoder_input_mask, decoder_sequence_labels, decoder_token_labels, decoder_choice_labels, encoder_hidden_states, encoder_attention_mask, ) = decoder_config_and_inputs # make sure that cross attention layers are added decoder_config.add_cross_attention = True # disable cache for now decoder_config.use_cache = False return { "config": config, "input_ids": input_ids, "attention_mask": input_mask, "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_token_type_ids": decoder_token_type_ids, "decoder_attention_mask": decoder_input_mask, "decoder_sequence_labels": decoder_sequence_labels, "decoder_token_labels": decoder_token_labels, "decoder_choice_labels": decoder_choice_labels, "encoder_hidden_states": encoder_hidden_states, "labels": decoder_token_labels, } @require_tf class TFEncoderDecoderModelTest(unittest.TestCase): def get_from_encoderdecoder_pretrained_model(self): return TFEncoderDecoderModel.from_encoder_decoder_pretrained("bert-base-cased", "bert-base-cased") def get_decoder_config(self): config = AutoConfig.from_pretrained("bert-base-cased") config.is_decoder = True config.add_cross_attention = True return config def get_encoderdecoder_model(self): return TFEncoderDecoderModel.from_pretrained("patrickvonplaten/bert2bert-cnn_dailymail-fp16") def get_encoder_decoder_models(self): encoder_model = TFBertModel.from_pretrained("bert-base-cased", name="encoder") decoder_model = TFBertLMHeadModel.from_pretrained( "bert-base-cased", config=self.get_decoder_config(), name="decoder" ) return {"encoder": encoder_model, "decoder": decoder_model} def _check_configuration_tie(self, model): assert id(model.decoder.config) == id(model.config.decoder) assert id(model.encoder.config) == id(model.config.encoder) @slow def test_configuration_tie(self): model = self.get_from_encoderdecoder_pretrained_model() self._check_configuration_tie(model) model = TFEncoderDecoderModel(**self.get_encoder_decoder_models()) self._check_configuration_tie(model) # # This should be enabled once we upload the TF version of # # "patrickvonplaten/bert2bert-cnn_dailymail-fp16" to the Hub. # model = self.get_encoderdecoder_model() # self._check_configuration_tie(model) @require_tf class TFEncoderDecoderModelSaveLoadTests(unittest.TestCase): def get_encoder_decoder_config(self): encoder_config = AutoConfig.from_pretrained("bert-base-uncased") decoder_config = AutoConfig.from_pretrained("bert-base-uncased", is_decoder=True, add_cross_attention=True) return EncoderDecoderConfig.from_encoder_decoder_configs(encoder_config, decoder_config) def get_encoder_decoder_config_small(self): encoder_config = AutoConfig.from_pretrained("hf-internal-testing/tiny-bert") decoder_config = AutoConfig.from_pretrained( "hf-internal-testing/tiny-bert", is_decoder=True, add_cross_attention=True ) return EncoderDecoderConfig.from_encoder_decoder_configs(encoder_config, decoder_config) def test_encoder_decoder_save_load_from_encoder_decoder(self): config = self.get_encoder_decoder_config_small() # create two random BERT models for bert2bert & initialize weights (+cross_attention weights) encoder = TFBertModel(config.encoder) encoder(encoder.dummy_inputs) decoder = TFBertLMHeadModel(config.decoder) decoder(decoder.dummy_inputs) encoder_decoder_orig = TFEncoderDecoderModel(encoder=encoder, decoder=decoder) input_ids = ids_tensor([13, 5], encoder.config.vocab_size) decoder_input_ids = ids_tensor([13, 1], decoder.config.vocab_size) logits_orig = encoder_decoder_orig(input_ids=input_ids, decoder_input_ids=decoder_input_ids).logits with tempfile.TemporaryDirectory() as tmp_dirname: encoder_path = os.path.join(tmp_dirname, "encoder") decoder_path = os.path.join(tmp_dirname, "decoder") encoder.save_pretrained(encoder_path) decoder.save_pretrained(decoder_path) encoder_decoder = TFEncoderDecoderModel.from_encoder_decoder_pretrained(encoder_path, decoder_path) logits_1 = encoder_decoder(input_ids=input_ids, decoder_input_ids=decoder_input_ids).logits self.assertTrue(logits_orig.numpy().sum() - logits_1.numpy().sum() < 1e-3) max_diff = np.max(np.abs(logits_1.numpy() - logits_orig.numpy())) self.assertAlmostEqual(max_diff, 0.0, places=4) with tempfile.TemporaryDirectory() as tmp_dirname: encoder_decoder.save_pretrained(tmp_dirname) encoder_decoder = TFEncoderDecoderModel.from_pretrained(tmp_dirname) logits_2 = encoder_decoder(input_ids=input_ids, decoder_input_ids=decoder_input_ids).logits max_diff = np.max(np.abs(logits_2.numpy() - logits_orig.numpy())) self.assertAlmostEqual(max_diff, 0.0, places=4) @require_torch @is_pt_tf_cross_test def test_encoder_decoder_save_load_from_encoder_decoder_from_pt(self): config = self.get_encoder_decoder_config_small() # create two random BERT models for bert2bert & initialize weights (+cross_attention weights) encoder_pt = BertModel(config.encoder).to(torch_device).eval() decoder_pt = BertLMHeadModel(config.decoder).to(torch_device).eval() encoder_decoder_pt = EncoderDecoderModel(encoder=encoder_pt, decoder=decoder_pt).to(torch_device).eval() input_ids = ids_tensor([13, 5], encoder_pt.config.vocab_size) decoder_input_ids = ids_tensor([13, 1], decoder_pt.config.vocab_size) pt_input_ids = torch.tensor(input_ids.numpy(), device=torch_device, dtype=torch.long) pt_decoder_input_ids = torch.tensor(decoder_input_ids.numpy(), device=torch_device, dtype=torch.long) logits_pt = encoder_decoder_pt(input_ids=pt_input_ids, decoder_input_ids=pt_decoder_input_ids).logits # PyTorch => TensorFlow with tempfile.TemporaryDirectory() as tmp_dirname_1, tempfile.TemporaryDirectory() as tmp_dirname_2: encoder_decoder_pt.encoder.save_pretrained(tmp_dirname_1) encoder_decoder_pt.decoder.save_pretrained(tmp_dirname_2) encoder_decoder_tf = TFEncoderDecoderModel.from_encoder_decoder_pretrained( tmp_dirname_1, tmp_dirname_2, encoder_from_pt=True, decoder_from_pt=True ) logits_tf = encoder_decoder_tf(input_ids=input_ids, decoder_input_ids=decoder_input_ids).logits max_diff = np.max(np.abs(logits_pt.detach().cpu().numpy() - logits_tf.numpy())) self.assertAlmostEqual(max_diff, 0.0, places=3) # Make sure `from_pretrained` following `save_pretrained` work and give the same result with tempfile.TemporaryDirectory() as tmp_dirname: encoder_decoder_tf.save_pretrained(tmp_dirname) encoder_decoder_tf = TFEncoderDecoderModel.from_pretrained(tmp_dirname) logits_tf_2 = encoder_decoder_tf(input_ids=input_ids, decoder_input_ids=decoder_input_ids).logits max_diff = np.max(np.abs(logits_tf_2.numpy() - logits_tf.numpy())) self.assertAlmostEqual(max_diff, 0.0, places=3) # TensorFlow => PyTorch with tempfile.TemporaryDirectory() as tmp_dirname: encoder_decoder_tf.save_pretrained(tmp_dirname) encoder_decoder_pt = EncoderDecoderModel.from_pretrained(tmp_dirname, from_tf=True) max_diff = np.max(np.abs(logits_pt.detach().cpu().numpy() - logits_tf.numpy())) self.assertAlmostEqual(max_diff, 0.0, places=3) @slow def test_encoder_decoder_from_pretrained(self): load_weight_prefix = TFEncoderDecoderModel.load_weight_prefix config = self.get_encoder_decoder_config() encoder_tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") decoder_tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") input_ids = encoder_tokenizer("who sings does he love me with reba", return_tensors="tf").input_ids decoder_input_ids = decoder_tokenizer("Linda Davis", return_tensors="tf").input_ids with tempfile.TemporaryDirectory() as tmp_dirname: # Since most of HF's models don't have pretrained cross-attention layers, they are randomly # initialized even if we create models using `from_pretrained` method. # For the tests, the decoder need to be a model with pretrained cross-attention layers. # So we create pretrained models (without `load_weight_prefix`), save them, and later, # we load them using `from_pretrained`. # (we don't need to do this for encoder, but let's make the code more similar between encoder/decoder) encoder = TFAutoModel.from_pretrained("bert-base-uncased", name="encoder") # It's necessary to specify `add_cross_attention=True` here. decoder = TFAutoModelForCausalLM.from_pretrained( "bert-base-uncased", is_decoder=True, add_cross_attention=True, name="decoder" ) pretrained_encoder_dir = os.path.join(tmp_dirname, "pretrained_encoder") pretrained_decoder_dir = os.path.join(tmp_dirname, "pretrained_decoder") encoder.save_pretrained(pretrained_encoder_dir) decoder.save_pretrained(pretrained_decoder_dir) del encoder del decoder enc_dec_model = TFEncoderDecoderModel.from_encoder_decoder_pretrained( pretrained_encoder_dir, pretrained_decoder_dir, ) # check that the from pretrained methods work enc_dec_model.save_pretrained(tmp_dirname) enc_dec_model = TFEncoderDecoderModel.from_pretrained(tmp_dirname) output = enc_dec_model(input_ids, decoder_input_ids=decoder_input_ids, labels=decoder_input_ids) loss_pretrained = output.loss del enc_dec_model # Create the model using `__init__` with loaded ``pretrained`` encoder / decoder encoder = TFAutoModel.from_pretrained( pretrained_encoder_dir, load_weight_prefix=load_weight_prefix, name="encoder" ) decoder = TFAutoModelForCausalLM.from_pretrained( pretrained_decoder_dir, load_weight_prefix=load_weight_prefix, name="decoder" ) enc_dec_model = TFEncoderDecoderModel(config=config, encoder=encoder, decoder=decoder) output = enc_dec_model(input_ids, decoder_input_ids=decoder_input_ids, labels=decoder_input_ids) loss_init = output.loss max_diff = np.max(np.abs(loss_pretrained - loss_init)) expected_diff = 0.0 self.assertAlmostEqual(max_diff, expected_diff, places=4)
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./tests/models/x_clip/__init__.py
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./docs/source/en/community.mdx
# Community This page regroups resources around 🤗 Transformers developed by the community. ## Community resources: | Resource | Description | Author | |:----------|:-------------|------:| | [Hugging Face Transformers Glossary Flashcards](https://www.darigovresearch.com/huggingface-transformers-glossary-flashcards) | A set of flashcards based on the [Transformers Docs Glossary](glossary) that has been put into a form which can be easily learnt/revised using [Anki ](https://apps.ankiweb.net/) an open source, cross platform app specifically designed for long term knowledge retention. See this [Introductory video on how to use the flashcards](https://www.youtube.com/watch?v=Dji_h7PILrw). | [Darigov Research](https://www.darigovresearch.com/) | ## Community notebooks: | Notebook | Description | Author | | |:----------|:-------------|:-------------|------:| | [Fine-tune a pre-trained Transformer to generate lyrics](https://github.com/AlekseyKorshuk/huggingartists) | How to generate lyrics in the style of your favorite artist by fine-tuning a GPT-2 model | [Aleksey Korshuk](https://github.com/AlekseyKorshuk) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/AlekseyKorshuk/huggingartists/blob/master/huggingartists-demo.ipynb) | | [Train T5 in Tensorflow 2 ](https://github.com/snapthat/TF-T5-text-to-text) | How to train T5 for any task using Tensorflow 2. This notebook demonstrates a Question & Answer task implemented in Tensorflow 2 using SQUAD | [Muhammad Harris](https://github.com/HarrisDePerceptron) |[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/snapthat/TF-T5-text-to-text/blob/master/snapthatT5/notebooks/TF-T5-Datasets%20Training.ipynb) | | [Train T5 on TPU](https://github.com/patil-suraj/exploring-T5/blob/master/T5_on_TPU.ipynb) | How to train T5 on SQUAD with Transformers and Nlp | [Suraj Patil](https://github.com/patil-suraj) |[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/patil-suraj/exploring-T5/blob/master/T5_on_TPU.ipynb#scrollTo=QLGiFCDqvuil) | | [Fine-tune T5 for Classification and Multiple Choice](https://github.com/patil-suraj/exploring-T5/blob/master/t5_fine_tuning.ipynb) | How to fine-tune T5 for classification and multiple choice tasks using a text-to-text format with PyTorch Lightning | [Suraj Patil](https://github.com/patil-suraj) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/patil-suraj/exploring-T5/blob/master/t5_fine_tuning.ipynb) | | [Fine-tune DialoGPT on New Datasets and Languages](https://github.com/ncoop57/i-am-a-nerd/blob/master/_notebooks/2020-05-12-chatbot-part-1.ipynb) | How to fine-tune the DialoGPT model on a new dataset for open-dialog conversational chatbots | [Nathan Cooper](https://github.com/ncoop57) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/ncoop57/i-am-a-nerd/blob/master/_notebooks/2020-05-12-chatbot-part-1.ipynb) | | [Long Sequence Modeling with Reformer](https://github.com/patrickvonplaten/notebooks/blob/master/PyTorch_Reformer.ipynb) | How to train on sequences as long as 500,000 tokens with Reformer | [Patrick von Platen](https://github.com/patrickvonplaten) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/PyTorch_Reformer.ipynb) | | [Fine-tune BART for Summarization](https://github.com/ohmeow/ohmeow_website/blob/master/_notebooks/2020-05-23-text-generation-with-blurr.ipynb) | How to fine-tune BART for summarization with fastai using blurr | [Wayde Gilliam](https://ohmeow.com/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/ohmeow/ohmeow_website/blob/master/_notebooks/2020-05-23-text-generation-with-blurr.ipynb) | | [Fine-tune a pre-trained Transformer on anyone's tweets](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb) | How to generate tweets in the style of your favorite Twitter account by fine-tuning a GPT-2 model | [Boris Dayma](https://github.com/borisdayma) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb) | | [Optimize 🤗 Hugging Face models with Weights & Biases](https://colab.research.google.com/github/wandb/examples/blob/master/colabs/huggingface/Optimize_Hugging_Face_models_with_Weights_%26_Biases.ipynb) | A complete tutorial showcasing W&B integration with Hugging Face | [Boris Dayma](https://github.com/borisdayma) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/wandb/examples/blob/master/colabs/huggingface/Optimize_Hugging_Face_models_with_Weights_%26_Biases.ipynb) | | [Pretrain Longformer](https://github.com/allenai/longformer/blob/master/scripts/convert_model_to_long.ipynb) | How to build a "long" version of existing pretrained models | [Iz Beltagy](https://beltagy.net) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/allenai/longformer/blob/master/scripts/convert_model_to_long.ipynb) | | [Fine-tune Longformer for QA](https://github.com/patil-suraj/Notebooks/blob/master/longformer_qa_training.ipynb) | How to fine-tune longformer model for QA task | [Suraj Patil](https://github.com/patil-suraj) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/patil-suraj/Notebooks/blob/master/longformer_qa_training.ipynb) | | [Evaluate Model with 🤗nlp](https://github.com/patrickvonplaten/notebooks/blob/master/How_to_evaluate_Longformer_on_TriviaQA_using_NLP.ipynb) | How to evaluate longformer on TriviaQA with `nlp` | [Patrick von Platen](https://github.com/patrickvonplaten) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1m7eTGlPmLRgoPkkA7rkhQdZ9ydpmsdLE?usp=sharing) | | [Fine-tune T5 for Sentiment Span Extraction](https://github.com/enzoampil/t5-intro/blob/master/t5_qa_training_pytorch_span_extraction.ipynb) | How to fine-tune T5 for sentiment span extraction using a text-to-text format with PyTorch Lightning | [Lorenzo Ampil](https://github.com/enzoampil) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/enzoampil/t5-intro/blob/master/t5_qa_training_pytorch_span_extraction.ipynb) | | [Fine-tune DistilBert for Multiclass Classification](https://github.com/abhimishra91/transformers-tutorials/blob/master/transformers_multiclass_classification.ipynb) | How to fine-tune DistilBert for multiclass classification with PyTorch | [Abhishek Kumar Mishra](https://github.com/abhimishra91) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/abhimishra91/transformers-tutorials/blob/master/transformers_multiclass_classification.ipynb)| |[Fine-tune BERT for Multi-label Classification](https://github.com/abhimishra91/transformers-tutorials/blob/master/transformers_multi_label_classification.ipynb)|How to fine-tune BERT for multi-label classification using PyTorch|[Abhishek Kumar Mishra](https://github.com/abhimishra91) |[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/abhimishra91/transformers-tutorials/blob/master/transformers_multi_label_classification.ipynb)| |[Fine-tune T5 for Summarization](https://github.com/abhimishra91/transformers-tutorials/blob/master/transformers_summarization_wandb.ipynb)|How to fine-tune T5 for summarization in PyTorch and track experiments with WandB|[Abhishek Kumar Mishra](https://github.com/abhimishra91) |[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/abhimishra91/transformers-tutorials/blob/master/transformers_summarization_wandb.ipynb)| |[Speed up Fine-Tuning in Transformers with Dynamic Padding / Bucketing](https://github.com/ELS-RD/transformers-notebook/blob/master/Divide_Hugging_Face_Transformers_training_time_by_2_or_more.ipynb)|How to speed up fine-tuning by a factor of 2 using dynamic padding / bucketing|[Michael Benesty](https://github.com/pommedeterresautee) |[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1CBfRU1zbfu7-ijiOqAAQUA-RJaxfcJoO?usp=sharing)| |[Pretrain Reformer for Masked Language Modeling](https://github.com/patrickvonplaten/notebooks/blob/master/Reformer_For_Masked_LM.ipynb)| How to train a Reformer model with bi-directional self-attention layers | [Patrick von Platen](https://github.com/patrickvonplaten) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1tzzh0i8PgDQGV3SMFUGxM7_gGae3K-uW?usp=sharing)| |[Expand and Fine Tune Sci-BERT](https://github.com/lordtt13/word-embeddings/blob/master/COVID-19%20Research%20Data/COVID-SciBERT.ipynb)| How to increase vocabulary of a pretrained SciBERT model from AllenAI on the CORD dataset and pipeline it. | [Tanmay Thakur](https://github.com/lordtt13) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1rqAR40goxbAfez1xvF3hBJphSCsvXmh8)| |[Fine Tune BlenderBotSmall for Summarization using the Trainer API](https://github.com/lordtt13/transformers-experiments/blob/master/Custom%20Tasks/fine-tune-blenderbot_small-for-summarization.ipynb)| How to fine tune BlenderBotSmall for summarization on a custom dataset, using the Trainer API. | [Tanmay Thakur](https://github.com/lordtt13) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/19Wmupuls7mykSGyRN_Qo6lPQhgp56ymq?usp=sharing)| |[Fine-tune Electra and interpret with Integrated Gradients](https://github.com/elsanns/xai-nlp-notebooks/blob/master/electra_fine_tune_interpret_captum_ig.ipynb) | How to fine-tune Electra for sentiment analysis and interpret predictions with Captum Integrated Gradients | [Eliza Szczechla](https://elsanns.github.io) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/elsanns/xai-nlp-notebooks/blob/master/electra_fine_tune_interpret_captum_ig.ipynb)| |[fine-tune a non-English GPT-2 Model with Trainer class](https://github.com/philschmid/fine-tune-GPT-2/blob/master/Fine_tune_a_non_English_GPT_2_Model_with_Huggingface.ipynb) | How to fine-tune a non-English GPT-2 Model with Trainer class | [Philipp Schmid](https://www.philschmid.de) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/philschmid/fine-tune-GPT-2/blob/master/Fine_tune_a_non_English_GPT_2_Model_with_Huggingface.ipynb)| |[Fine-tune a DistilBERT Model for Multi Label Classification task](https://github.com/DhavalTaunk08/Transformers_scripts/blob/master/Transformers_multilabel_distilbert.ipynb) | How to fine-tune a DistilBERT Model for Multi Label Classification task | [Dhaval Taunk](https://github.com/DhavalTaunk08) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/DhavalTaunk08/Transformers_scripts/blob/master/Transformers_multilabel_distilbert.ipynb)| |[Fine-tune ALBERT for sentence-pair classification](https://github.com/NadirEM/nlp-notebooks/blob/master/Fine_tune_ALBERT_sentence_pair_classification.ipynb) | How to fine-tune an ALBERT model or another BERT-based model for the sentence-pair classification task | [Nadir El Manouzi](https://github.com/NadirEM) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NadirEM/nlp-notebooks/blob/master/Fine_tune_ALBERT_sentence_pair_classification.ipynb)| |[Fine-tune Roberta for sentiment analysis](https://github.com/DhavalTaunk08/NLP_scripts/blob/master/sentiment_analysis_using_roberta.ipynb) | How to fine-tune a Roberta model for sentiment analysis | [Dhaval Taunk](https://github.com/DhavalTaunk08) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/DhavalTaunk08/NLP_scripts/blob/master/sentiment_analysis_using_roberta.ipynb)| |[Evaluating Question Generation Models](https://github.com/flexudy-pipe/qugeev) | How accurate are the answers to questions generated by your seq2seq transformer model? | [Pascal Zoleko](https://github.com/zolekode) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1bpsSqCQU-iw_5nNoRm_crPq6FRuJthq_?usp=sharing)| |[Classify text with DistilBERT and Tensorflow](https://github.com/peterbayerle/huggingface_notebook/blob/main/distilbert_tf.ipynb) | How to fine-tune DistilBERT for text classification in TensorFlow | [Peter Bayerle](https://github.com/peterbayerle) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/peterbayerle/huggingface_notebook/blob/main/distilbert_tf.ipynb)| |[Leverage BERT for Encoder-Decoder Summarization on CNN/Dailymail](https://github.com/patrickvonplaten/notebooks/blob/master/BERT2BERT_for_CNN_Dailymail.ipynb) | How to warm-start a *EncoderDecoderModel* with a *bert-base-uncased* checkpoint for summarization on CNN/Dailymail | [Patrick von Platen](https://github.com/patrickvonplaten) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/BERT2BERT_for_CNN_Dailymail.ipynb)| |[Leverage RoBERTa for Encoder-Decoder Summarization on BBC XSum](https://github.com/patrickvonplaten/notebooks/blob/master/RoBERTaShared_for_BBC_XSum.ipynb) | How to warm-start a shared *EncoderDecoderModel* with a *roberta-base* checkpoint for summarization on BBC/XSum | [Patrick von Platen](https://github.com/patrickvonplaten) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/RoBERTaShared_for_BBC_XSum.ipynb)| |[Fine-tune TAPAS on Sequential Question Answering (SQA)](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/TAPAS/Fine_tuning_TapasForQuestionAnswering_on_SQA.ipynb) | How to fine-tune *TapasForQuestionAnswering* with a *tapas-base* checkpoint on the Sequential Question Answering (SQA) dataset | [Niels Rogge](https://github.com/nielsrogge) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/TAPAS/Fine_tuning_TapasForQuestionAnswering_on_SQA.ipynb)| |[Evaluate TAPAS on Table Fact Checking (TabFact)](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/TAPAS/Evaluating_TAPAS_on_the_Tabfact_test_set.ipynb) | How to evaluate a fine-tuned *TapasForSequenceClassification* with a *tapas-base-finetuned-tabfact* checkpoint using a combination of the 🤗 datasets and 🤗 transformers libraries | [Niels Rogge](https://github.com/nielsrogge) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/TAPAS/Evaluating_TAPAS_on_the_Tabfact_test_set.ipynb)| |[Fine-tuning mBART for translation](https://colab.research.google.com/github/vasudevgupta7/huggingface-tutorials/blob/main/translation_training.ipynb) | How to fine-tune mBART using Seq2SeqTrainer for Hindi to English translation | [Vasudev Gupta](https://github.com/vasudevgupta7) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/vasudevgupta7/huggingface-tutorials/blob/main/translation_training.ipynb)| |[Fine-tune LayoutLM on FUNSD (a form understanding dataset)](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/LayoutLM/Fine_tuning_LayoutLMForTokenClassification_on_FUNSD.ipynb) | How to fine-tune *LayoutLMForTokenClassification* on the FUNSD dataset for information extraction from scanned documents | [Niels Rogge](https://github.com/nielsrogge) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLM/Fine_tuning_LayoutLMForTokenClassification_on_FUNSD.ipynb)| |[Fine-Tune DistilGPT2 and Generate Text](https://colab.research.google.com/github/tripathiaakash/DistilGPT2-Tutorial/blob/main/distilgpt2_fine_tuning.ipynb) | How to fine-tune DistilGPT2 and generate text | [Aakash Tripathi](https://github.com/tripathiaakash) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/tripathiaakash/DistilGPT2-Tutorial/blob/main/distilgpt2_fine_tuning.ipynb)| |[Fine-Tune LED on up to 8K tokens](https://github.com/patrickvonplaten/notebooks/blob/master/Fine_tune_Longformer_Encoder_Decoder_(LED)_for_Summarization_on_pubmed.ipynb) | How to fine-tune LED on pubmed for long-range summarization | [Patrick von Platen](https://github.com/patrickvonplaten) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/Fine_tune_Longformer_Encoder_Decoder_(LED)_for_Summarization_on_pubmed.ipynb)| |[Evaluate LED on Arxiv](https://github.com/patrickvonplaten/notebooks/blob/master/LED_on_Arxiv.ipynb) | How to effectively evaluate LED on long-range summarization | [Patrick von Platen](https://github.com/patrickvonplaten) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/LED_on_Arxiv.ipynb)| |[Fine-tune LayoutLM on RVL-CDIP (a document image classification dataset)](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/LayoutLM/Fine_tuning_LayoutLMForSequenceClassification_on_RVL_CDIP.ipynb) | How to fine-tune *LayoutLMForSequenceClassification* on the RVL-CDIP dataset for scanned document classification | [Niels Rogge](https://github.com/nielsrogge) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLM/Fine_tuning_LayoutLMForSequenceClassification_on_RVL_CDIP.ipynb)| |[Wav2Vec2 CTC decoding with GPT2 adjustment](https://github.com/voidful/huggingface_notebook/blob/main/xlsr_gpt.ipynb) | How to decode CTC sequence with language model adjustment | [Eric Lam](https://github.com/voidful) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1e_z5jQHYbO2YKEaUgzb1ww1WwiAyydAj?usp=sharing)| |[Fine-tune BART for summarization in two languages with Trainer class](https://github.com/elsanns/xai-nlp-notebooks/blob/master/fine_tune_bart_summarization_two_langs.ipynb) | How to fine-tune BART for summarization in two languages with Trainer class | [Eliza Szczechla](https://github.com/elsanns) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/elsanns/xai-nlp-notebooks/blob/master/fine_tune_bart_summarization_two_langs.ipynb)| |[Evaluate Big Bird on Trivia QA](https://github.com/patrickvonplaten/notebooks/blob/master/Evaluating_Big_Bird_on_TriviaQA.ipynb) | How to evaluate BigBird on long document question answering on Trivia QA | [Patrick von Platen](https://github.com/patrickvonplaten) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/Evaluating_Big_Bird_on_TriviaQA.ipynb)| | [Create video captions using Wav2Vec2](https://github.com/Muennighoff/ytclipcc/blob/main/wav2vec_youtube_captions.ipynb) | How to create YouTube captions from any video by transcribing the audio with Wav2Vec | [Niklas Muennighoff](https://github.com/Muennighoff) |[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Muennighoff/ytclipcc/blob/main/wav2vec_youtube_captions.ipynb) | | [Fine-tune the Vision Transformer on CIFAR-10 using PyTorch Lightning](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/VisionTransformer/Fine_tuning_the_Vision_Transformer_on_CIFAR_10_with_PyTorch_Lightning.ipynb) | How to fine-tune the Vision Transformer (ViT) on CIFAR-10 using HuggingFace Transformers, Datasets and PyTorch Lightning | [Niels Rogge](https://github.com/nielsrogge) |[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/VisionTransformer/Fine_tuning_the_Vision_Transformer_on_CIFAR_10_with_PyTorch_Lightning.ipynb) | | [Fine-tune the Vision Transformer on CIFAR-10 using the 🤗 Trainer](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/VisionTransformer/Fine_tuning_the_Vision_Transformer_on_CIFAR_10_with_the_%F0%9F%A4%97_Trainer.ipynb) | How to fine-tune the Vision Transformer (ViT) on CIFAR-10 using HuggingFace Transformers, Datasets and the 🤗 Trainer | [Niels Rogge](https://github.com/nielsrogge) |[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/VisionTransformer/Fine_tuning_the_Vision_Transformer_on_CIFAR_10_with_the_%F0%9F%A4%97_Trainer.ipynb) | | [Evaluate LUKE on Open Entity, an entity typing dataset](https://github.com/studio-ousia/luke/blob/master/notebooks/huggingface_open_entity.ipynb) | How to evaluate *LukeForEntityClassification* on the Open Entity dataset | [Ikuya Yamada](https://github.com/ikuyamada) |[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/studio-ousia/luke/blob/master/notebooks/huggingface_open_entity.ipynb) | | [Evaluate LUKE on TACRED, a relation extraction dataset](https://github.com/studio-ousia/luke/blob/master/notebooks/huggingface_tacred.ipynb) | How to evaluate *LukeForEntityPairClassification* on the TACRED dataset | [Ikuya Yamada](https://github.com/ikuyamada) |[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/studio-ousia/luke/blob/master/notebooks/huggingface_tacred.ipynb) | | [Evaluate LUKE on CoNLL-2003, an important NER benchmark](https://github.com/studio-ousia/luke/blob/master/notebooks/huggingface_conll_2003.ipynb) | How to evaluate *LukeForEntitySpanClassification* on the CoNLL-2003 dataset | [Ikuya Yamada](https://github.com/ikuyamada) |[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/studio-ousia/luke/blob/master/notebooks/huggingface_conll_2003.ipynb) | | [Evaluate BigBird-Pegasus on PubMed dataset](https://github.com/vasudevgupta7/bigbird/blob/main/notebooks/bigbird_pegasus_evaluation.ipynb) | How to evaluate *BigBirdPegasusForConditionalGeneration* on PubMed dataset | [Vasudev Gupta](https://github.com/vasudevgupta7) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/vasudevgupta7/bigbird/blob/main/notebooks/bigbird_pegasus_evaluation.ipynb) | | [Speech Emotion Classification with Wav2Vec2](https://github/m3hrdadfi/soxan/blob/main/notebooks/Emotion_recognition_in_Greek_speech_using_Wav2Vec2.ipynb) | How to leverage a pretrained Wav2Vec2 model for Emotion Classification on the MEGA dataset | [Mehrdad Farahani](https://github.com/m3hrdadfi) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/m3hrdadfi/soxan/blob/main/notebooks/Emotion_recognition_in_Greek_speech_using_Wav2Vec2.ipynb) | | [Detect objects in an image with DETR](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/DETR/DETR_minimal_example_(with_DetrFeatureExtractor).ipynb) | How to use a trained *DetrForObjectDetection* model to detect objects in an image and visualize attention | [Niels Rogge](https://github.com/NielsRogge) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/DETR/DETR_minimal_example_(with_DetrFeatureExtractor).ipynb) | | [Fine-tune DETR on a custom object detection dataset](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/DETR/Fine_tuning_DetrForObjectDetection_on_custom_dataset_(balloon).ipynb) | How to fine-tune *DetrForObjectDetection* on a custom object detection dataset | [Niels Rogge](https://github.com/NielsRogge) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/DETR/Fine_tuning_DetrForObjectDetection_on_custom_dataset_(balloon).ipynb) | | [Finetune T5 for Named Entity Recognition](https://github.com/ToluClassics/Notebooks/blob/main/T5_Ner_Finetuning.ipynb) | How to fine-tune *T5* on a Named Entity Recognition Task | [Ogundepo Odunayo](https://github.com/ToluClassics) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1obr78FY_cBmWY5ODViCmzdY6O1KB65Vc?usp=sharing) |
# Community This page regroups resources around 🤗 Transformers developed by the community. ## Community resources: | Resource | Description | Author | |:----------|:-------------|------:| | [Hugging Face Transformers Glossary Flashcards](https://www.darigovresearch.com/huggingface-transformers-glossary-flashcards) | A set of flashcards based on the [Transformers Docs Glossary](glossary) that has been put into a form which can be easily learnt/revised using [Anki ](https://apps.ankiweb.net/) an open source, cross platform app specifically designed for long term knowledge retention. See this [Introductory video on how to use the flashcards](https://www.youtube.com/watch?v=Dji_h7PILrw). | [Darigov Research](https://www.darigovresearch.com/) | ## Community notebooks: | Notebook | Description | Author | | |:----------|:-------------|:-------------|------:| | [Fine-tune a pre-trained Transformer to generate lyrics](https://github.com/AlekseyKorshuk/huggingartists) | How to generate lyrics in the style of your favorite artist by fine-tuning a GPT-2 model | [Aleksey Korshuk](https://github.com/AlekseyKorshuk) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/AlekseyKorshuk/huggingartists/blob/master/huggingartists-demo.ipynb) | | [Train T5 in Tensorflow 2 ](https://github.com/snapthat/TF-T5-text-to-text) | How to train T5 for any task using Tensorflow 2. This notebook demonstrates a Question & Answer task implemented in Tensorflow 2 using SQUAD | [Muhammad Harris](https://github.com/HarrisDePerceptron) |[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/snapthat/TF-T5-text-to-text/blob/master/snapthatT5/notebooks/TF-T5-Datasets%20Training.ipynb) | | [Train T5 on TPU](https://github.com/patil-suraj/exploring-T5/blob/master/T5_on_TPU.ipynb) | How to train T5 on SQUAD with Transformers and Nlp | [Suraj Patil](https://github.com/patil-suraj) |[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/patil-suraj/exploring-T5/blob/master/T5_on_TPU.ipynb#scrollTo=QLGiFCDqvuil) | | [Fine-tune T5 for Classification and Multiple Choice](https://github.com/patil-suraj/exploring-T5/blob/master/t5_fine_tuning.ipynb) | How to fine-tune T5 for classification and multiple choice tasks using a text-to-text format with PyTorch Lightning | [Suraj Patil](https://github.com/patil-suraj) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/patil-suraj/exploring-T5/blob/master/t5_fine_tuning.ipynb) | | [Fine-tune DialoGPT on New Datasets and Languages](https://github.com/ncoop57/i-am-a-nerd/blob/master/_notebooks/2020-05-12-chatbot-part-1.ipynb) | How to fine-tune the DialoGPT model on a new dataset for open-dialog conversational chatbots | [Nathan Cooper](https://github.com/ncoop57) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/ncoop57/i-am-a-nerd/blob/master/_notebooks/2020-05-12-chatbot-part-1.ipynb) | | [Long Sequence Modeling with Reformer](https://github.com/patrickvonplaten/notebooks/blob/master/PyTorch_Reformer.ipynb) | How to train on sequences as long as 500,000 tokens with Reformer | [Patrick von Platen](https://github.com/patrickvonplaten) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/PyTorch_Reformer.ipynb) | | [Fine-tune BART for Summarization](https://github.com/ohmeow/ohmeow_website/blob/master/_notebooks/2020-05-23-text-generation-with-blurr.ipynb) | How to fine-tune BART for summarization with fastai using blurr | [Wayde Gilliam](https://ohmeow.com/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/ohmeow/ohmeow_website/blob/master/_notebooks/2020-05-23-text-generation-with-blurr.ipynb) | | [Fine-tune a pre-trained Transformer on anyone's tweets](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb) | How to generate tweets in the style of your favorite Twitter account by fine-tuning a GPT-2 model | [Boris Dayma](https://github.com/borisdayma) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb) | | [Optimize 🤗 Hugging Face models with Weights & Biases](https://colab.research.google.com/github/wandb/examples/blob/master/colabs/huggingface/Optimize_Hugging_Face_models_with_Weights_%26_Biases.ipynb) | A complete tutorial showcasing W&B integration with Hugging Face | [Boris Dayma](https://github.com/borisdayma) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/wandb/examples/blob/master/colabs/huggingface/Optimize_Hugging_Face_models_with_Weights_%26_Biases.ipynb) | | [Pretrain Longformer](https://github.com/allenai/longformer/blob/master/scripts/convert_model_to_long.ipynb) | How to build a "long" version of existing pretrained models | [Iz Beltagy](https://beltagy.net) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/allenai/longformer/blob/master/scripts/convert_model_to_long.ipynb) | | [Fine-tune Longformer for QA](https://github.com/patil-suraj/Notebooks/blob/master/longformer_qa_training.ipynb) | How to fine-tune longformer model for QA task | [Suraj Patil](https://github.com/patil-suraj) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/patil-suraj/Notebooks/blob/master/longformer_qa_training.ipynb) | | [Evaluate Model with 🤗nlp](https://github.com/patrickvonplaten/notebooks/blob/master/How_to_evaluate_Longformer_on_TriviaQA_using_NLP.ipynb) | How to evaluate longformer on TriviaQA with `nlp` | [Patrick von Platen](https://github.com/patrickvonplaten) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1m7eTGlPmLRgoPkkA7rkhQdZ9ydpmsdLE?usp=sharing) | | [Fine-tune T5 for Sentiment Span Extraction](https://github.com/enzoampil/t5-intro/blob/master/t5_qa_training_pytorch_span_extraction.ipynb) | How to fine-tune T5 for sentiment span extraction using a text-to-text format with PyTorch Lightning | [Lorenzo Ampil](https://github.com/enzoampil) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/enzoampil/t5-intro/blob/master/t5_qa_training_pytorch_span_extraction.ipynb) | | [Fine-tune DistilBert for Multiclass Classification](https://github.com/abhimishra91/transformers-tutorials/blob/master/transformers_multiclass_classification.ipynb) | How to fine-tune DistilBert for multiclass classification with PyTorch | [Abhishek Kumar Mishra](https://github.com/abhimishra91) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/abhimishra91/transformers-tutorials/blob/master/transformers_multiclass_classification.ipynb)| |[Fine-tune BERT for Multi-label Classification](https://github.com/abhimishra91/transformers-tutorials/blob/master/transformers_multi_label_classification.ipynb)|How to fine-tune BERT for multi-label classification using PyTorch|[Abhishek Kumar Mishra](https://github.com/abhimishra91) |[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/abhimishra91/transformers-tutorials/blob/master/transformers_multi_label_classification.ipynb)| |[Fine-tune T5 for Summarization](https://github.com/abhimishra91/transformers-tutorials/blob/master/transformers_summarization_wandb.ipynb)|How to fine-tune T5 for summarization in PyTorch and track experiments with WandB|[Abhishek Kumar Mishra](https://github.com/abhimishra91) |[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/abhimishra91/transformers-tutorials/blob/master/transformers_summarization_wandb.ipynb)| |[Speed up Fine-Tuning in Transformers with Dynamic Padding / Bucketing](https://github.com/ELS-RD/transformers-notebook/blob/master/Divide_Hugging_Face_Transformers_training_time_by_2_or_more.ipynb)|How to speed up fine-tuning by a factor of 2 using dynamic padding / bucketing|[Michael Benesty](https://github.com/pommedeterresautee) |[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1CBfRU1zbfu7-ijiOqAAQUA-RJaxfcJoO?usp=sharing)| |[Pretrain Reformer for Masked Language Modeling](https://github.com/patrickvonplaten/notebooks/blob/master/Reformer_For_Masked_LM.ipynb)| How to train a Reformer model with bi-directional self-attention layers | [Patrick von Platen](https://github.com/patrickvonplaten) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1tzzh0i8PgDQGV3SMFUGxM7_gGae3K-uW?usp=sharing)| |[Expand and Fine Tune Sci-BERT](https://github.com/lordtt13/word-embeddings/blob/master/COVID-19%20Research%20Data/COVID-SciBERT.ipynb)| How to increase vocabulary of a pretrained SciBERT model from AllenAI on the CORD dataset and pipeline it. | [Tanmay Thakur](https://github.com/lordtt13) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1rqAR40goxbAfez1xvF3hBJphSCsvXmh8)| |[Fine Tune BlenderBotSmall for Summarization using the Trainer API](https://github.com/lordtt13/transformers-experiments/blob/master/Custom%20Tasks/fine-tune-blenderbot_small-for-summarization.ipynb)| How to fine tune BlenderBotSmall for summarization on a custom dataset, using the Trainer API. | [Tanmay Thakur](https://github.com/lordtt13) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/19Wmupuls7mykSGyRN_Qo6lPQhgp56ymq?usp=sharing)| |[Fine-tune Electra and interpret with Integrated Gradients](https://github.com/elsanns/xai-nlp-notebooks/blob/master/electra_fine_tune_interpret_captum_ig.ipynb) | How to fine-tune Electra for sentiment analysis and interpret predictions with Captum Integrated Gradients | [Eliza Szczechla](https://elsanns.github.io) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/elsanns/xai-nlp-notebooks/blob/master/electra_fine_tune_interpret_captum_ig.ipynb)| |[fine-tune a non-English GPT-2 Model with Trainer class](https://github.com/philschmid/fine-tune-GPT-2/blob/master/Fine_tune_a_non_English_GPT_2_Model_with_Huggingface.ipynb) | How to fine-tune a non-English GPT-2 Model with Trainer class | [Philipp Schmid](https://www.philschmid.de) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/philschmid/fine-tune-GPT-2/blob/master/Fine_tune_a_non_English_GPT_2_Model_with_Huggingface.ipynb)| |[Fine-tune a DistilBERT Model for Multi Label Classification task](https://github.com/DhavalTaunk08/Transformers_scripts/blob/master/Transformers_multilabel_distilbert.ipynb) | How to fine-tune a DistilBERT Model for Multi Label Classification task | [Dhaval Taunk](https://github.com/DhavalTaunk08) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/DhavalTaunk08/Transformers_scripts/blob/master/Transformers_multilabel_distilbert.ipynb)| |[Fine-tune ALBERT for sentence-pair classification](https://github.com/NadirEM/nlp-notebooks/blob/master/Fine_tune_ALBERT_sentence_pair_classification.ipynb) | How to fine-tune an ALBERT model or another BERT-based model for the sentence-pair classification task | [Nadir El Manouzi](https://github.com/NadirEM) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NadirEM/nlp-notebooks/blob/master/Fine_tune_ALBERT_sentence_pair_classification.ipynb)| |[Fine-tune Roberta for sentiment analysis](https://github.com/DhavalTaunk08/NLP_scripts/blob/master/sentiment_analysis_using_roberta.ipynb) | How to fine-tune a Roberta model for sentiment analysis | [Dhaval Taunk](https://github.com/DhavalTaunk08) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/DhavalTaunk08/NLP_scripts/blob/master/sentiment_analysis_using_roberta.ipynb)| |[Evaluating Question Generation Models](https://github.com/flexudy-pipe/qugeev) | How accurate are the answers to questions generated by your seq2seq transformer model? | [Pascal Zoleko](https://github.com/zolekode) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1bpsSqCQU-iw_5nNoRm_crPq6FRuJthq_?usp=sharing)| |[Classify text with DistilBERT and Tensorflow](https://github.com/peterbayerle/huggingface_notebook/blob/main/distilbert_tf.ipynb) | How to fine-tune DistilBERT for text classification in TensorFlow | [Peter Bayerle](https://github.com/peterbayerle) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/peterbayerle/huggingface_notebook/blob/main/distilbert_tf.ipynb)| |[Leverage BERT for Encoder-Decoder Summarization on CNN/Dailymail](https://github.com/patrickvonplaten/notebooks/blob/master/BERT2BERT_for_CNN_Dailymail.ipynb) | How to warm-start a *EncoderDecoderModel* with a *bert-base-uncased* checkpoint for summarization on CNN/Dailymail | [Patrick von Platen](https://github.com/patrickvonplaten) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/BERT2BERT_for_CNN_Dailymail.ipynb)| |[Leverage RoBERTa for Encoder-Decoder Summarization on BBC XSum](https://github.com/patrickvonplaten/notebooks/blob/master/RoBERTaShared_for_BBC_XSum.ipynb) | How to warm-start a shared *EncoderDecoderModel* with a *roberta-base* checkpoint for summarization on BBC/XSum | [Patrick von Platen](https://github.com/patrickvonplaten) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/RoBERTaShared_for_BBC_XSum.ipynb)| |[Fine-tune TAPAS on Sequential Question Answering (SQA)](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/TAPAS/Fine_tuning_TapasForQuestionAnswering_on_SQA.ipynb) | How to fine-tune *TapasForQuestionAnswering* with a *tapas-base* checkpoint on the Sequential Question Answering (SQA) dataset | [Niels Rogge](https://github.com/nielsrogge) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/TAPAS/Fine_tuning_TapasForQuestionAnswering_on_SQA.ipynb)| |[Evaluate TAPAS on Table Fact Checking (TabFact)](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/TAPAS/Evaluating_TAPAS_on_the_Tabfact_test_set.ipynb) | How to evaluate a fine-tuned *TapasForSequenceClassification* with a *tapas-base-finetuned-tabfact* checkpoint using a combination of the 🤗 datasets and 🤗 transformers libraries | [Niels Rogge](https://github.com/nielsrogge) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/TAPAS/Evaluating_TAPAS_on_the_Tabfact_test_set.ipynb)| |[Fine-tuning mBART for translation](https://colab.research.google.com/github/vasudevgupta7/huggingface-tutorials/blob/main/translation_training.ipynb) | How to fine-tune mBART using Seq2SeqTrainer for Hindi to English translation | [Vasudev Gupta](https://github.com/vasudevgupta7) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/vasudevgupta7/huggingface-tutorials/blob/main/translation_training.ipynb)| |[Fine-tune LayoutLM on FUNSD (a form understanding dataset)](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/LayoutLM/Fine_tuning_LayoutLMForTokenClassification_on_FUNSD.ipynb) | How to fine-tune *LayoutLMForTokenClassification* on the FUNSD dataset for information extraction from scanned documents | [Niels Rogge](https://github.com/nielsrogge) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLM/Fine_tuning_LayoutLMForTokenClassification_on_FUNSD.ipynb)| |[Fine-Tune DistilGPT2 and Generate Text](https://colab.research.google.com/github/tripathiaakash/DistilGPT2-Tutorial/blob/main/distilgpt2_fine_tuning.ipynb) | How to fine-tune DistilGPT2 and generate text | [Aakash Tripathi](https://github.com/tripathiaakash) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/tripathiaakash/DistilGPT2-Tutorial/blob/main/distilgpt2_fine_tuning.ipynb)| |[Fine-Tune LED on up to 8K tokens](https://github.com/patrickvonplaten/notebooks/blob/master/Fine_tune_Longformer_Encoder_Decoder_(LED)_for_Summarization_on_pubmed.ipynb) | How to fine-tune LED on pubmed for long-range summarization | [Patrick von Platen](https://github.com/patrickvonplaten) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/Fine_tune_Longformer_Encoder_Decoder_(LED)_for_Summarization_on_pubmed.ipynb)| |[Evaluate LED on Arxiv](https://github.com/patrickvonplaten/notebooks/blob/master/LED_on_Arxiv.ipynb) | How to effectively evaluate LED on long-range summarization | [Patrick von Platen](https://github.com/patrickvonplaten) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/LED_on_Arxiv.ipynb)| |[Fine-tune LayoutLM on RVL-CDIP (a document image classification dataset)](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/LayoutLM/Fine_tuning_LayoutLMForSequenceClassification_on_RVL_CDIP.ipynb) | How to fine-tune *LayoutLMForSequenceClassification* on the RVL-CDIP dataset for scanned document classification | [Niels Rogge](https://github.com/nielsrogge) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLM/Fine_tuning_LayoutLMForSequenceClassification_on_RVL_CDIP.ipynb)| |[Wav2Vec2 CTC decoding with GPT2 adjustment](https://github.com/voidful/huggingface_notebook/blob/main/xlsr_gpt.ipynb) | How to decode CTC sequence with language model adjustment | [Eric Lam](https://github.com/voidful) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1e_z5jQHYbO2YKEaUgzb1ww1WwiAyydAj?usp=sharing)| |[Fine-tune BART for summarization in two languages with Trainer class](https://github.com/elsanns/xai-nlp-notebooks/blob/master/fine_tune_bart_summarization_two_langs.ipynb) | How to fine-tune BART for summarization in two languages with Trainer class | [Eliza Szczechla](https://github.com/elsanns) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/elsanns/xai-nlp-notebooks/blob/master/fine_tune_bart_summarization_two_langs.ipynb)| |[Evaluate Big Bird on Trivia QA](https://github.com/patrickvonplaten/notebooks/blob/master/Evaluating_Big_Bird_on_TriviaQA.ipynb) | How to evaluate BigBird on long document question answering on Trivia QA | [Patrick von Platen](https://github.com/patrickvonplaten) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/Evaluating_Big_Bird_on_TriviaQA.ipynb)| | [Create video captions using Wav2Vec2](https://github.com/Muennighoff/ytclipcc/blob/main/wav2vec_youtube_captions.ipynb) | How to create YouTube captions from any video by transcribing the audio with Wav2Vec | [Niklas Muennighoff](https://github.com/Muennighoff) |[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Muennighoff/ytclipcc/blob/main/wav2vec_youtube_captions.ipynb) | | [Fine-tune the Vision Transformer on CIFAR-10 using PyTorch Lightning](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/VisionTransformer/Fine_tuning_the_Vision_Transformer_on_CIFAR_10_with_PyTorch_Lightning.ipynb) | How to fine-tune the Vision Transformer (ViT) on CIFAR-10 using HuggingFace Transformers, Datasets and PyTorch Lightning | [Niels Rogge](https://github.com/nielsrogge) |[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/VisionTransformer/Fine_tuning_the_Vision_Transformer_on_CIFAR_10_with_PyTorch_Lightning.ipynb) | | [Fine-tune the Vision Transformer on CIFAR-10 using the 🤗 Trainer](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/VisionTransformer/Fine_tuning_the_Vision_Transformer_on_CIFAR_10_with_the_%F0%9F%A4%97_Trainer.ipynb) | How to fine-tune the Vision Transformer (ViT) on CIFAR-10 using HuggingFace Transformers, Datasets and the 🤗 Trainer | [Niels Rogge](https://github.com/nielsrogge) |[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/VisionTransformer/Fine_tuning_the_Vision_Transformer_on_CIFAR_10_with_the_%F0%9F%A4%97_Trainer.ipynb) | | [Evaluate LUKE on Open Entity, an entity typing dataset](https://github.com/studio-ousia/luke/blob/master/notebooks/huggingface_open_entity.ipynb) | How to evaluate *LukeForEntityClassification* on the Open Entity dataset | [Ikuya Yamada](https://github.com/ikuyamada) |[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/studio-ousia/luke/blob/master/notebooks/huggingface_open_entity.ipynb) | | [Evaluate LUKE on TACRED, a relation extraction dataset](https://github.com/studio-ousia/luke/blob/master/notebooks/huggingface_tacred.ipynb) | How to evaluate *LukeForEntityPairClassification* on the TACRED dataset | [Ikuya Yamada](https://github.com/ikuyamada) |[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/studio-ousia/luke/blob/master/notebooks/huggingface_tacred.ipynb) | | [Evaluate LUKE on CoNLL-2003, an important NER benchmark](https://github.com/studio-ousia/luke/blob/master/notebooks/huggingface_conll_2003.ipynb) | How to evaluate *LukeForEntitySpanClassification* on the CoNLL-2003 dataset | [Ikuya Yamada](https://github.com/ikuyamada) |[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/studio-ousia/luke/blob/master/notebooks/huggingface_conll_2003.ipynb) | | [Evaluate BigBird-Pegasus on PubMed dataset](https://github.com/vasudevgupta7/bigbird/blob/main/notebooks/bigbird_pegasus_evaluation.ipynb) | How to evaluate *BigBirdPegasusForConditionalGeneration* on PubMed dataset | [Vasudev Gupta](https://github.com/vasudevgupta7) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/vasudevgupta7/bigbird/blob/main/notebooks/bigbird_pegasus_evaluation.ipynb) | | [Speech Emotion Classification with Wav2Vec2](https://github/m3hrdadfi/soxan/blob/main/notebooks/Emotion_recognition_in_Greek_speech_using_Wav2Vec2.ipynb) | How to leverage a pretrained Wav2Vec2 model for Emotion Classification on the MEGA dataset | [Mehrdad Farahani](https://github.com/m3hrdadfi) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/m3hrdadfi/soxan/blob/main/notebooks/Emotion_recognition_in_Greek_speech_using_Wav2Vec2.ipynb) | | [Detect objects in an image with DETR](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/DETR/DETR_minimal_example_(with_DetrFeatureExtractor).ipynb) | How to use a trained *DetrForObjectDetection* model to detect objects in an image and visualize attention | [Niels Rogge](https://github.com/NielsRogge) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/DETR/DETR_minimal_example_(with_DetrFeatureExtractor).ipynb) | | [Fine-tune DETR on a custom object detection dataset](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/DETR/Fine_tuning_DetrForObjectDetection_on_custom_dataset_(balloon).ipynb) | How to fine-tune *DetrForObjectDetection* on a custom object detection dataset | [Niels Rogge](https://github.com/NielsRogge) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/DETR/Fine_tuning_DetrForObjectDetection_on_custom_dataset_(balloon).ipynb) | | [Finetune T5 for Named Entity Recognition](https://github.com/ToluClassics/Notebooks/blob/main/T5_Ner_Finetuning.ipynb) | How to fine-tune *T5* on a Named Entity Recognition Task | [Ogundepo Odunayo](https://github.com/ToluClassics) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1obr78FY_cBmWY5ODViCmzdY6O1KB65Vc?usp=sharing) |
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./examples/tensorflow/token-classification/run_ner.py
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning a 🤗 Transformers model on token classification tasks (NER, POS, CHUNKS) relying on the accelerate library without using a Trainer. """ import json import logging import os import random from dataclasses import dataclass, field from typing import Optional import datasets import tensorflow as tf from datasets import ClassLabel, load_dataset import evaluate import transformers from transformers import ( CONFIG_MAPPING, AutoConfig, AutoTokenizer, DataCollatorForTokenClassification, HfArgumentParser, PushToHubCallback, TFAutoModelForTokenClassification, TFTrainingArguments, create_optimizer, set_seed, ) from transformers.utils import send_example_telemetry from transformers.utils.versions import require_version logger = logging.getLogger(__name__) logger.addHandler(logging.StreamHandler()) require_version("datasets>=1.8.0", "To fix: pip install -r examples/tensorflow/token-classification/requirements.txt") # region Command-line arguments @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ task_name: Optional[str] = field(default="ner", metadata={"help": "The name of the task (ner, pos...)."}) dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field( default=None, metadata={"help": "The input training data file (a csv or JSON file)."} ) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate on (a csv or JSON file)."}, ) test_file: Optional[str] = field( default=None, metadata={"help": "An optional input test data file to predict on (a csv or JSON file)."}, ) text_column_name: Optional[str] = field( default=None, metadata={"help": "The column name of text to input in the file (a csv or JSON file)."} ) label_column_name: Optional[str] = field( default=None, metadata={"help": "The column name of label to input in the file (a csv or JSON file)."} ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) max_length: Optional[int] = field(default=256, metadata={"help": "Max length (in tokens) for truncation/padding"}) pad_to_max_length: bool = field( default=False, metadata={ "help": ( "Whether to pad all samples to model maximum sentence length. " "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " "efficient on GPU but very bad for TPU." ) }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) max_predict_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) }, ) label_all_tokens: bool = field( default=False, metadata={ "help": ( "Whether to put the label for one word on all tokens of generated by that word or just on the " "one (in which case the other tokens will have a padding index)." ) }, ) return_entity_level_metrics: bool = field( default=False, metadata={"help": "Whether to return all the entity levels during evaluation or just the overall ones."}, ) def __post_init__(self): if self.dataset_name is None and self.train_file is None and self.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." self.task_name = self.task_name.lower() # endregion def main(): # region Argument Parsing parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments)) model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_ner", model_args, data_args, framework="tensorflow") # endregion # region Setup logging # we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO) datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() # If passed along, set the training seed now. if training_args.seed is not None: set_seed(training_args.seed) # endregion # region Loading datasets # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets for token classification task available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'tokens' or the first column if no column called # 'tokens' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( data_args.dataset_name, data_args.dataset_config_name, use_auth_token=True if model_args.use_auth_token else None, ) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.train_file.split(".")[-1] raw_datasets = load_dataset( extension, data_files=data_files, use_auth_token=True if model_args.use_auth_token else None, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. if raw_datasets["train"] is not None: column_names = raw_datasets["train"].column_names features = raw_datasets["train"].features else: column_names = raw_datasets["validation"].column_names features = raw_datasets["validation"].features if data_args.text_column_name is not None: text_column_name = data_args.text_column_name elif "tokens" in column_names: text_column_name = "tokens" else: text_column_name = column_names[0] if data_args.label_column_name is not None: label_column_name = data_args.label_column_name elif f"{data_args.task_name}_tags" in column_names: label_column_name = f"{data_args.task_name}_tags" else: label_column_name = column_names[1] # In the event the labels are not a `Sequence[ClassLabel]`, we will need to go through the dataset to get the # unique labels. def get_label_list(labels): unique_labels = set() for label in labels: unique_labels = unique_labels | set(label) label_list = list(unique_labels) label_list.sort() return label_list if isinstance(features[label_column_name].feature, ClassLabel): label_list = features[label_column_name].feature.names # No need to convert the labels since they are already ints. label_to_id = {i: i for i in range(len(label_list))} else: label_list = get_label_list(raw_datasets["train"][label_column_name]) label_to_id = {l: i for i, l in enumerate(label_list)} num_labels = len(label_list) # endregion # region Load config and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: config = AutoConfig.from_pretrained(model_args.config_name, num_labels=num_labels) elif model_args.model_name_or_path: config = AutoConfig.from_pretrained(model_args.model_name_or_path, num_labels=num_labels) else: config = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") tokenizer_name_or_path = model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path if not tokenizer_name_or_path: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if config.model_type in {"gpt2", "roberta"}: tokenizer = AutoTokenizer.from_pretrained(tokenizer_name_or_path, use_fast=True, add_prefix_space=True) else: tokenizer = AutoTokenizer.from_pretrained(tokenizer_name_or_path, use_fast=True) # endregion # region Preprocessing the raw datasets # First we tokenize all the texts. padding = "max_length" if data_args.pad_to_max_length else False # Tokenize all texts and align the labels with them. def tokenize_and_align_labels(examples): tokenized_inputs = tokenizer( examples[text_column_name], max_length=data_args.max_length, padding=padding, truncation=True, # We use this argument because the texts in our dataset are lists of words (with a label for each word). is_split_into_words=True, ) labels = [] for i, label in enumerate(examples[label_column_name]): word_ids = tokenized_inputs.word_ids(batch_index=i) previous_word_idx = None label_ids = [] for word_idx in word_ids: # Special tokens have a word id that is None. We set the label to -100 so they are automatically # ignored in the loss function. if word_idx is None: label_ids.append(-100) # We set the label for the first token of each word. elif word_idx != previous_word_idx: label_ids.append(label_to_id[label[word_idx]]) # For the other tokens in a word, we set the label to either the current label or -100, depending on # the label_all_tokens flag. else: label_ids.append(label_to_id[label[word_idx]] if data_args.label_all_tokens else -100) previous_word_idx = word_idx labels.append(label_ids) tokenized_inputs["labels"] = labels return tokenized_inputs processed_raw_datasets = raw_datasets.map( tokenize_and_align_labels, batched=True, remove_columns=raw_datasets["train"].column_names, desc="Running tokenizer on dataset", ) train_dataset = processed_raw_datasets["train"] eval_dataset = processed_raw_datasets["validation"] if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) # Log a few random samples from the training set: for index in random.sample(range(len(train_dataset)), 3): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") # endregion with training_args.strategy.scope(): # region Initialize model if model_args.model_name_or_path: model = TFAutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path, config=config, ) else: logger.info("Training new model from scratch") model = TFAutoModelForTokenClassification.from_config(config) # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch # on a small vocab and want a smaller embedding size, remove this test. embedding_size = model.get_input_embeddings().weight.shape[0] if len(tokenizer) > embedding_size: model.resize_token_embeddings(len(tokenizer)) # endregion # region Create TF datasets # We need the DataCollatorForTokenClassification here, as we need to correctly pad labels as # well as inputs. collate_fn = DataCollatorForTokenClassification(tokenizer=tokenizer, return_tensors="tf") num_replicas = training_args.strategy.num_replicas_in_sync total_train_batch_size = training_args.per_device_train_batch_size * num_replicas dataset_options = tf.data.Options() dataset_options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF # model.prepare_tf_dataset() wraps a Hugging Face dataset in a tf.data.Dataset which is ready to use in # training. This is the recommended way to use a Hugging Face dataset when training with Keras. You can also # use the lower-level dataset.to_tf_dataset() method, but you will have to specify things like column names # yourself if you use this method, whereas they are automatically inferred from the model input names when # using model.prepare_tf_dataset() # For more info see the docs: # https://huggingface.co/docs/transformers/main/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset # https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.to_tf_dataset tf_train_dataset = model.prepare_tf_dataset( train_dataset, collate_fn=collate_fn, batch_size=total_train_batch_size, shuffle=True, ).with_options(dataset_options) total_eval_batch_size = training_args.per_device_eval_batch_size * num_replicas tf_eval_dataset = model.prepare_tf_dataset( eval_dataset, collate_fn=collate_fn, batch_size=total_eval_batch_size, shuffle=False, ).with_options(dataset_options) # endregion # region Optimizer, loss and compilation num_train_steps = int(len(tf_train_dataset) * training_args.num_train_epochs) if training_args.warmup_steps > 0: num_warmup_steps = training_args.warmup_steps elif training_args.warmup_ratio > 0: num_warmup_steps = int(num_train_steps * training_args.warmup_ratio) else: num_warmup_steps = 0 optimizer, lr_schedule = create_optimizer( init_lr=training_args.learning_rate, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps, adam_beta1=training_args.adam_beta1, adam_beta2=training_args.adam_beta2, adam_epsilon=training_args.adam_epsilon, weight_decay_rate=training_args.weight_decay, adam_global_clipnorm=training_args.max_grad_norm, ) model.compile(optimizer=optimizer, jit_compile=training_args.xla) # endregion # Metrics metric = evaluate.load("seqeval") def get_labels(y_pred, y_true): # Transform predictions and references tensos to numpy arrays # Remove ignored index (special tokens) true_predictions = [ [label_list[p] for (p, l) in zip(pred, gold_label) if l != -100] for pred, gold_label in zip(y_pred, y_true) ] true_labels = [ [label_list[l] for (p, l) in zip(pred, gold_label) if l != -100] for pred, gold_label in zip(y_pred, y_true) ] return true_predictions, true_labels def compute_metrics(): results = metric.compute() if data_args.return_entity_level_metrics: # Unpack nested dictionaries final_results = {} for key, value in results.items(): if isinstance(value, dict): for n, v in value.items(): final_results[f"{key}_{n}"] = v else: final_results[key] = value return final_results else: return { "precision": results["overall_precision"], "recall": results["overall_recall"], "f1": results["overall_f1"], "accuracy": results["overall_accuracy"], } # endregion # region Preparing push_to_hub and model card push_to_hub_model_id = training_args.push_to_hub_model_id model_name = model_args.model_name_or_path.split("/")[-1] if not push_to_hub_model_id: if data_args.dataset_name is not None: push_to_hub_model_id = f"{model_name}-finetuned-{data_args.dataset_name}" else: push_to_hub_model_id = f"{model_name}-finetuned-token-classification" model_card_kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "token-classification"} if data_args.dataset_name is not None: model_card_kwargs["dataset_tags"] = data_args.dataset_name if data_args.dataset_config_name is not None: model_card_kwargs["dataset_args"] = data_args.dataset_config_name model_card_kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" else: model_card_kwargs["dataset"] = data_args.dataset_name if training_args.push_to_hub: callbacks = [ PushToHubCallback( output_dir=training_args.output_dir, model_id=push_to_hub_model_id, organization=training_args.push_to_hub_organization, token=training_args.push_to_hub_token, tokenizer=tokenizer, **model_card_kwargs, ) ] else: callbacks = [] # endregion # region Training logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {training_args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {training_args.per_device_train_batch_size}") logger.info(f" Total train batch size = {total_train_batch_size}") # Only show the progress bar once on each machine. model.fit( tf_train_dataset, validation_data=tf_eval_dataset, epochs=int(training_args.num_train_epochs), callbacks=callbacks, ) # endregion # region Predictions # If you have variable batch sizes (i.e. not using pad_to_max_length), then # this bit might fail on TF < 2.8 because TF can't concatenate outputs of varying seq # length from predict(). try: predictions = model.predict(tf_eval_dataset, batch_size=training_args.per_device_eval_batch_size)["logits"] except tf.python.framework.errors_impl.InvalidArgumentError: raise ValueError( "Concatenating predictions failed! If your version of TensorFlow is 2.8.0 or older " "then you will need to use --pad_to_max_length to generate predictions, as older " "versions of TensorFlow cannot concatenate variable-length predictions as RaggedTensor." ) if isinstance(predictions, tf.RaggedTensor): predictions = predictions.to_tensor(default_value=-100) predictions = tf.math.argmax(predictions, axis=-1).numpy() if "label" in eval_dataset: labels = eval_dataset.with_format("tf")["label"] else: labels = eval_dataset.with_format("tf")["labels"] if isinstance(labels, tf.RaggedTensor): labels = labels.to_tensor(default_value=-100) labels = labels.numpy() attention_mask = eval_dataset.with_format("tf")["attention_mask"] if isinstance(attention_mask, tf.RaggedTensor): attention_mask = attention_mask.to_tensor(default_value=-100) attention_mask = attention_mask.numpy() labels[attention_mask == 0] = -100 preds, refs = get_labels(predictions, labels) metric.add_batch( predictions=preds, references=refs, ) eval_metric = compute_metrics() logger.info("Evaluation metrics:") for key, val in eval_metric.items(): logger.info(f"{key}: {val:.4f}") if training_args.output_dir is not None: output_eval_file = os.path.join(training_args.output_dir, "all_results.json") with open(output_eval_file, "w") as writer: writer.write(json.dumps(eval_metric)) # endregion if training_args.output_dir is not None and not training_args.push_to_hub: # If we're not pushing to hub, at least save a local copy when we're done model.save_pretrained(training_args.output_dir) if __name__ == "__main__": main()
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning a 🤗 Transformers model on token classification tasks (NER, POS, CHUNKS) relying on the accelerate library without using a Trainer. """ import json import logging import os import random from dataclasses import dataclass, field from typing import Optional import datasets import tensorflow as tf from datasets import ClassLabel, load_dataset import evaluate import transformers from transformers import ( CONFIG_MAPPING, AutoConfig, AutoTokenizer, DataCollatorForTokenClassification, HfArgumentParser, PushToHubCallback, TFAutoModelForTokenClassification, TFTrainingArguments, create_optimizer, set_seed, ) from transformers.utils import send_example_telemetry from transformers.utils.versions import require_version logger = logging.getLogger(__name__) logger.addHandler(logging.StreamHandler()) require_version("datasets>=1.8.0", "To fix: pip install -r examples/tensorflow/token-classification/requirements.txt") # region Command-line arguments @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ task_name: Optional[str] = field(default="ner", metadata={"help": "The name of the task (ner, pos...)."}) dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field( default=None, metadata={"help": "The input training data file (a csv or JSON file)."} ) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate on (a csv or JSON file)."}, ) test_file: Optional[str] = field( default=None, metadata={"help": "An optional input test data file to predict on (a csv or JSON file)."}, ) text_column_name: Optional[str] = field( default=None, metadata={"help": "The column name of text to input in the file (a csv or JSON file)."} ) label_column_name: Optional[str] = field( default=None, metadata={"help": "The column name of label to input in the file (a csv or JSON file)."} ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) max_length: Optional[int] = field(default=256, metadata={"help": "Max length (in tokens) for truncation/padding"}) pad_to_max_length: bool = field( default=False, metadata={ "help": ( "Whether to pad all samples to model maximum sentence length. " "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " "efficient on GPU but very bad for TPU." ) }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) max_predict_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) }, ) label_all_tokens: bool = field( default=False, metadata={ "help": ( "Whether to put the label for one word on all tokens of generated by that word or just on the " "one (in which case the other tokens will have a padding index)." ) }, ) return_entity_level_metrics: bool = field( default=False, metadata={"help": "Whether to return all the entity levels during evaluation or just the overall ones."}, ) def __post_init__(self): if self.dataset_name is None and self.train_file is None and self.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." self.task_name = self.task_name.lower() # endregion def main(): # region Argument Parsing parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments)) model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_ner", model_args, data_args, framework="tensorflow") # endregion # region Setup logging # we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO) datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() # If passed along, set the training seed now. if training_args.seed is not None: set_seed(training_args.seed) # endregion # region Loading datasets # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets for token classification task available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'tokens' or the first column if no column called # 'tokens' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( data_args.dataset_name, data_args.dataset_config_name, use_auth_token=True if model_args.use_auth_token else None, ) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.train_file.split(".")[-1] raw_datasets = load_dataset( extension, data_files=data_files, use_auth_token=True if model_args.use_auth_token else None, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. if raw_datasets["train"] is not None: column_names = raw_datasets["train"].column_names features = raw_datasets["train"].features else: column_names = raw_datasets["validation"].column_names features = raw_datasets["validation"].features if data_args.text_column_name is not None: text_column_name = data_args.text_column_name elif "tokens" in column_names: text_column_name = "tokens" else: text_column_name = column_names[0] if data_args.label_column_name is not None: label_column_name = data_args.label_column_name elif f"{data_args.task_name}_tags" in column_names: label_column_name = f"{data_args.task_name}_tags" else: label_column_name = column_names[1] # In the event the labels are not a `Sequence[ClassLabel]`, we will need to go through the dataset to get the # unique labels. def get_label_list(labels): unique_labels = set() for label in labels: unique_labels = unique_labels | set(label) label_list = list(unique_labels) label_list.sort() return label_list if isinstance(features[label_column_name].feature, ClassLabel): label_list = features[label_column_name].feature.names # No need to convert the labels since they are already ints. label_to_id = {i: i for i in range(len(label_list))} else: label_list = get_label_list(raw_datasets["train"][label_column_name]) label_to_id = {l: i for i, l in enumerate(label_list)} num_labels = len(label_list) # endregion # region Load config and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: config = AutoConfig.from_pretrained(model_args.config_name, num_labels=num_labels) elif model_args.model_name_or_path: config = AutoConfig.from_pretrained(model_args.model_name_or_path, num_labels=num_labels) else: config = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") tokenizer_name_or_path = model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path if not tokenizer_name_or_path: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if config.model_type in {"gpt2", "roberta"}: tokenizer = AutoTokenizer.from_pretrained(tokenizer_name_or_path, use_fast=True, add_prefix_space=True) else: tokenizer = AutoTokenizer.from_pretrained(tokenizer_name_or_path, use_fast=True) # endregion # region Preprocessing the raw datasets # First we tokenize all the texts. padding = "max_length" if data_args.pad_to_max_length else False # Tokenize all texts and align the labels with them. def tokenize_and_align_labels(examples): tokenized_inputs = tokenizer( examples[text_column_name], max_length=data_args.max_length, padding=padding, truncation=True, # We use this argument because the texts in our dataset are lists of words (with a label for each word). is_split_into_words=True, ) labels = [] for i, label in enumerate(examples[label_column_name]): word_ids = tokenized_inputs.word_ids(batch_index=i) previous_word_idx = None label_ids = [] for word_idx in word_ids: # Special tokens have a word id that is None. We set the label to -100 so they are automatically # ignored in the loss function. if word_idx is None: label_ids.append(-100) # We set the label for the first token of each word. elif word_idx != previous_word_idx: label_ids.append(label_to_id[label[word_idx]]) # For the other tokens in a word, we set the label to either the current label or -100, depending on # the label_all_tokens flag. else: label_ids.append(label_to_id[label[word_idx]] if data_args.label_all_tokens else -100) previous_word_idx = word_idx labels.append(label_ids) tokenized_inputs["labels"] = labels return tokenized_inputs processed_raw_datasets = raw_datasets.map( tokenize_and_align_labels, batched=True, remove_columns=raw_datasets["train"].column_names, desc="Running tokenizer on dataset", ) train_dataset = processed_raw_datasets["train"] eval_dataset = processed_raw_datasets["validation"] if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) # Log a few random samples from the training set: for index in random.sample(range(len(train_dataset)), 3): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") # endregion with training_args.strategy.scope(): # region Initialize model if model_args.model_name_or_path: model = TFAutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path, config=config, ) else: logger.info("Training new model from scratch") model = TFAutoModelForTokenClassification.from_config(config) # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch # on a small vocab and want a smaller embedding size, remove this test. embedding_size = model.get_input_embeddings().weight.shape[0] if len(tokenizer) > embedding_size: model.resize_token_embeddings(len(tokenizer)) # endregion # region Create TF datasets # We need the DataCollatorForTokenClassification here, as we need to correctly pad labels as # well as inputs. collate_fn = DataCollatorForTokenClassification(tokenizer=tokenizer, return_tensors="tf") num_replicas = training_args.strategy.num_replicas_in_sync total_train_batch_size = training_args.per_device_train_batch_size * num_replicas dataset_options = tf.data.Options() dataset_options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF # model.prepare_tf_dataset() wraps a Hugging Face dataset in a tf.data.Dataset which is ready to use in # training. This is the recommended way to use a Hugging Face dataset when training with Keras. You can also # use the lower-level dataset.to_tf_dataset() method, but you will have to specify things like column names # yourself if you use this method, whereas they are automatically inferred from the model input names when # using model.prepare_tf_dataset() # For more info see the docs: # https://huggingface.co/docs/transformers/main/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset # https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.to_tf_dataset tf_train_dataset = model.prepare_tf_dataset( train_dataset, collate_fn=collate_fn, batch_size=total_train_batch_size, shuffle=True, ).with_options(dataset_options) total_eval_batch_size = training_args.per_device_eval_batch_size * num_replicas tf_eval_dataset = model.prepare_tf_dataset( eval_dataset, collate_fn=collate_fn, batch_size=total_eval_batch_size, shuffle=False, ).with_options(dataset_options) # endregion # region Optimizer, loss and compilation num_train_steps = int(len(tf_train_dataset) * training_args.num_train_epochs) if training_args.warmup_steps > 0: num_warmup_steps = training_args.warmup_steps elif training_args.warmup_ratio > 0: num_warmup_steps = int(num_train_steps * training_args.warmup_ratio) else: num_warmup_steps = 0 optimizer, lr_schedule = create_optimizer( init_lr=training_args.learning_rate, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps, adam_beta1=training_args.adam_beta1, adam_beta2=training_args.adam_beta2, adam_epsilon=training_args.adam_epsilon, weight_decay_rate=training_args.weight_decay, adam_global_clipnorm=training_args.max_grad_norm, ) model.compile(optimizer=optimizer, jit_compile=training_args.xla) # endregion # Metrics metric = evaluate.load("seqeval") def get_labels(y_pred, y_true): # Transform predictions and references tensos to numpy arrays # Remove ignored index (special tokens) true_predictions = [ [label_list[p] for (p, l) in zip(pred, gold_label) if l != -100] for pred, gold_label in zip(y_pred, y_true) ] true_labels = [ [label_list[l] for (p, l) in zip(pred, gold_label) if l != -100] for pred, gold_label in zip(y_pred, y_true) ] return true_predictions, true_labels def compute_metrics(): results = metric.compute() if data_args.return_entity_level_metrics: # Unpack nested dictionaries final_results = {} for key, value in results.items(): if isinstance(value, dict): for n, v in value.items(): final_results[f"{key}_{n}"] = v else: final_results[key] = value return final_results else: return { "precision": results["overall_precision"], "recall": results["overall_recall"], "f1": results["overall_f1"], "accuracy": results["overall_accuracy"], } # endregion # region Preparing push_to_hub and model card push_to_hub_model_id = training_args.push_to_hub_model_id model_name = model_args.model_name_or_path.split("/")[-1] if not push_to_hub_model_id: if data_args.dataset_name is not None: push_to_hub_model_id = f"{model_name}-finetuned-{data_args.dataset_name}" else: push_to_hub_model_id = f"{model_name}-finetuned-token-classification" model_card_kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "token-classification"} if data_args.dataset_name is not None: model_card_kwargs["dataset_tags"] = data_args.dataset_name if data_args.dataset_config_name is not None: model_card_kwargs["dataset_args"] = data_args.dataset_config_name model_card_kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" else: model_card_kwargs["dataset"] = data_args.dataset_name if training_args.push_to_hub: callbacks = [ PushToHubCallback( output_dir=training_args.output_dir, model_id=push_to_hub_model_id, organization=training_args.push_to_hub_organization, token=training_args.push_to_hub_token, tokenizer=tokenizer, **model_card_kwargs, ) ] else: callbacks = [] # endregion # region Training logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {training_args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {training_args.per_device_train_batch_size}") logger.info(f" Total train batch size = {total_train_batch_size}") # Only show the progress bar once on each machine. model.fit( tf_train_dataset, validation_data=tf_eval_dataset, epochs=int(training_args.num_train_epochs), callbacks=callbacks, ) # endregion # region Predictions # If you have variable batch sizes (i.e. not using pad_to_max_length), then # this bit might fail on TF < 2.8 because TF can't concatenate outputs of varying seq # length from predict(). try: predictions = model.predict(tf_eval_dataset, batch_size=training_args.per_device_eval_batch_size)["logits"] except tf.python.framework.errors_impl.InvalidArgumentError: raise ValueError( "Concatenating predictions failed! If your version of TensorFlow is 2.8.0 or older " "then you will need to use --pad_to_max_length to generate predictions, as older " "versions of TensorFlow cannot concatenate variable-length predictions as RaggedTensor." ) if isinstance(predictions, tf.RaggedTensor): predictions = predictions.to_tensor(default_value=-100) predictions = tf.math.argmax(predictions, axis=-1).numpy() if "label" in eval_dataset: labels = eval_dataset.with_format("tf")["label"] else: labels = eval_dataset.with_format("tf")["labels"] if isinstance(labels, tf.RaggedTensor): labels = labels.to_tensor(default_value=-100) labels = labels.numpy() attention_mask = eval_dataset.with_format("tf")["attention_mask"] if isinstance(attention_mask, tf.RaggedTensor): attention_mask = attention_mask.to_tensor(default_value=-100) attention_mask = attention_mask.numpy() labels[attention_mask == 0] = -100 preds, refs = get_labels(predictions, labels) metric.add_batch( predictions=preds, references=refs, ) eval_metric = compute_metrics() logger.info("Evaluation metrics:") for key, val in eval_metric.items(): logger.info(f"{key}: {val:.4f}") if training_args.output_dir is not None: output_eval_file = os.path.join(training_args.output_dir, "all_results.json") with open(output_eval_file, "w") as writer: writer.write(json.dumps(eval_metric)) # endregion if training_args.output_dir is not None and not training_args.push_to_hub: # If we're not pushing to hub, at least save a local copy when we're done model.save_pretrained(training_args.output_dir) if __name__ == "__main__": main()
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./.github/ISSUE_TEMPLATE/migration.yml
name: "\U0001F4DA Migration from pytorch-pretrained-bert or pytorch-transformers" description: Report a problem when migrating from pytorch-pretrained-bert or pytorch-transformers to transformers labels: [ "migration" ] body: - type: textarea id: system-info attributes: label: System Info description: Please share your system info with us. You can run the command `transformers-cli env` and copy-paste its output below. render: shell placeholder: transformers version, platform, python version, ... validations: required: true - type: checkboxes id: information-scripts-examples attributes: label: Information description: 'The problem arises when using:' options: - label: "The official example scripts" - label: "My own modified scripts" - type: checkboxes id: information-tasks attributes: label: Tasks description: "The tasks I am working on are:" options: - label: "An officially supported task in the `examples` folder (such as GLUE/SQuAD, ...)" - label: "My own task or dataset (give details below)" - type: textarea id: reproduction validations: required: true attributes: label: Reproduction description: | Please provide a code sample that reproduces the problem you ran into. It can be a Colab link or just a code snippet. If you have code snippets, error messages, stack traces please provide them here as well. Important! Use code tags to correctly format your code. See https://help.github.com/en/github/writing-on-github/creating-and-highlighting-code-blocks#syntax-highlighting Do not use screenshots, as they are hard to read and (more importantly) don't allow others to copy-and-paste your code. placeholder: | Steps to reproduce the behavior: 1. 2. 3. - type: textarea id: expected-behavior validations: required: true attributes: label: Expected behavior description: "A clear and concise description of what you would expect to happen." render: shell - type: checkboxes id: checklist attributes: label: Checklist options: - label: "I have read the migration guide in the readme. ([pytorch-transformers](https://github.com/huggingface/transformers#migrating-from-pytorch-transformers-to-transformers); [pytorch-pretrained-bert](https://github.com/huggingface/transformers#migrating-from-pytorch-pretrained-bert-to-transformers))" required: true - label: "I checked if a related official extension example runs on my machine." required: true
name: "\U0001F4DA Migration from pytorch-pretrained-bert or pytorch-transformers" description: Report a problem when migrating from pytorch-pretrained-bert or pytorch-transformers to transformers labels: [ "migration" ] body: - type: textarea id: system-info attributes: label: System Info description: Please share your system info with us. You can run the command `transformers-cli env` and copy-paste its output below. render: shell placeholder: transformers version, platform, python version, ... validations: required: true - type: checkboxes id: information-scripts-examples attributes: label: Information description: 'The problem arises when using:' options: - label: "The official example scripts" - label: "My own modified scripts" - type: checkboxes id: information-tasks attributes: label: Tasks description: "The tasks I am working on are:" options: - label: "An officially supported task in the `examples` folder (such as GLUE/SQuAD, ...)" - label: "My own task or dataset (give details below)" - type: textarea id: reproduction validations: required: true attributes: label: Reproduction description: | Please provide a code sample that reproduces the problem you ran into. It can be a Colab link or just a code snippet. If you have code snippets, error messages, stack traces please provide them here as well. Important! Use code tags to correctly format your code. See https://help.github.com/en/github/writing-on-github/creating-and-highlighting-code-blocks#syntax-highlighting Do not use screenshots, as they are hard to read and (more importantly) don't allow others to copy-and-paste your code. placeholder: | Steps to reproduce the behavior: 1. 2. 3. - type: textarea id: expected-behavior validations: required: true attributes: label: Expected behavior description: "A clear and concise description of what you would expect to happen." render: shell - type: checkboxes id: checklist attributes: label: Checklist options: - label: "I have read the migration guide in the readme. ([pytorch-transformers](https://github.com/huggingface/transformers#migrating-from-pytorch-transformers-to-transformers); [pytorch-pretrained-bert](https://github.com/huggingface/transformers#migrating-from-pytorch-pretrained-bert-to-transformers))" required: true - label: "I checked if a related official extension example runs on my machine." required: true
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./tests/pipelines/test_pipelines_object_detection.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import nested_simplify, require_tf, require_timm, require_torch, require_vision, slow from .test_pipelines_common import ANY, PipelineTestCaseMeta if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @require_vision @require_timm @require_torch class ObjectDetectionPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): model_mapping = MODEL_FOR_OBJECT_DETECTION_MAPPING def get_test_pipeline(self, model, tokenizer, feature_extractor): object_detector = ObjectDetectionPipeline(model=model, feature_extractor=feature_extractor) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def run_pipeline_test(self, object_detector, examples): outputs = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png", threshold=0.0) self.assertGreater(len(outputs), 0) for detected_object in outputs: self.assertEqual( detected_object, { "score": ANY(float), "label": ANY(str), "box": {"xmin": ANY(int), "ymin": ANY(int), "xmax": ANY(int), "ymax": ANY(int)}, }, ) import datasets dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test") batch = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["file"], # LA dataset[1]["file"], # L dataset[2]["file"], ] batch_outputs = object_detector(batch, threshold=0.0) self.assertEqual(len(batch), len(batch_outputs)) for outputs in batch_outputs: self.assertGreater(len(outputs), 0) for detected_object in outputs: self.assertEqual( detected_object, { "score": ANY(float), "label": ANY(str), "box": {"xmin": ANY(int), "ymin": ANY(int), "xmax": ANY(int), "ymax": ANY(int)}, }, ) @require_tf @unittest.skip("Object detection not implemented in TF") def test_small_model_tf(self): pass @require_torch def test_small_model_pt(self): model_id = "hf-internal-testing/tiny-detr-mobilenetsv3" model = AutoModelForObjectDetection.from_pretrained(model_id) feature_extractor = AutoFeatureExtractor.from_pretrained(model_id) object_detector = ObjectDetectionPipeline(model=model, feature_extractor=feature_extractor) outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=0.0) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], ) outputs = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ], threshold=0.0, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], ], ) @require_torch @slow def test_large_model_pt(self): model_id = "facebook/detr-resnet-50" model = AutoModelForObjectDetection.from_pretrained(model_id) feature_extractor = AutoFeatureExtractor.from_pretrained(model_id) object_detector = ObjectDetectionPipeline(model=model, feature_extractor=feature_extractor) outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ) outputs = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ], ) @require_torch @slow def test_integration_torch_object_detection(self): model_id = "facebook/detr-resnet-50" object_detector = pipeline("object-detection", model=model_id) outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ) outputs = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ], ) @require_torch @slow def test_threshold(self): threshold = 0.9985 model_id = "facebook/detr-resnet-50" object_detector = pipeline("object-detection", model=model_id) outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=threshold) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ) @require_torch @slow def test_layoutlm(self): model_id = "philschmid/layoutlm-funsd" threshold = 0.998 object_detector = pipeline("object-detection", model=model_id, threshold=threshold) outputs = object_detector( "https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" ) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9982, "label": "B-QUESTION", "box": {"xmin": 654, "ymin": 165, "xmax": 719, "ymax": 719}, }, { "score": 0.9982, "label": "I-QUESTION", "box": {"xmin": 691, "ymin": 202, "xmax": 735, "ymax": 735}, }, ], )
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import nested_simplify, require_tf, require_timm, require_torch, require_vision, slow from .test_pipelines_common import ANY, PipelineTestCaseMeta if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @require_vision @require_timm @require_torch class ObjectDetectionPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): model_mapping = MODEL_FOR_OBJECT_DETECTION_MAPPING def get_test_pipeline(self, model, tokenizer, feature_extractor): object_detector = ObjectDetectionPipeline(model=model, feature_extractor=feature_extractor) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def run_pipeline_test(self, object_detector, examples): outputs = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png", threshold=0.0) self.assertGreater(len(outputs), 0) for detected_object in outputs: self.assertEqual( detected_object, { "score": ANY(float), "label": ANY(str), "box": {"xmin": ANY(int), "ymin": ANY(int), "xmax": ANY(int), "ymax": ANY(int)}, }, ) import datasets dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test") batch = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["file"], # LA dataset[1]["file"], # L dataset[2]["file"], ] batch_outputs = object_detector(batch, threshold=0.0) self.assertEqual(len(batch), len(batch_outputs)) for outputs in batch_outputs: self.assertGreater(len(outputs), 0) for detected_object in outputs: self.assertEqual( detected_object, { "score": ANY(float), "label": ANY(str), "box": {"xmin": ANY(int), "ymin": ANY(int), "xmax": ANY(int), "ymax": ANY(int)}, }, ) @require_tf @unittest.skip("Object detection not implemented in TF") def test_small_model_tf(self): pass @require_torch def test_small_model_pt(self): model_id = "hf-internal-testing/tiny-detr-mobilenetsv3" model = AutoModelForObjectDetection.from_pretrained(model_id) feature_extractor = AutoFeatureExtractor.from_pretrained(model_id) object_detector = ObjectDetectionPipeline(model=model, feature_extractor=feature_extractor) outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=0.0) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], ) outputs = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ], threshold=0.0, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], ], ) @require_torch @slow def test_large_model_pt(self): model_id = "facebook/detr-resnet-50" model = AutoModelForObjectDetection.from_pretrained(model_id) feature_extractor = AutoFeatureExtractor.from_pretrained(model_id) object_detector = ObjectDetectionPipeline(model=model, feature_extractor=feature_extractor) outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ) outputs = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ], ) @require_torch @slow def test_integration_torch_object_detection(self): model_id = "facebook/detr-resnet-50" object_detector = pipeline("object-detection", model=model_id) outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ) outputs = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ], ) @require_torch @slow def test_threshold(self): threshold = 0.9985 model_id = "facebook/detr-resnet-50" object_detector = pipeline("object-detection", model=model_id) outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=threshold) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ) @require_torch @slow def test_layoutlm(self): model_id = "philschmid/layoutlm-funsd" threshold = 0.998 object_detector = pipeline("object-detection", model=model_id, threshold=threshold) outputs = object_detector( "https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" ) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9982, "label": "B-QUESTION", "box": {"xmin": 654, "ymin": 165, "xmax": 719, "ymax": 719}, }, { "score": 0.9982, "label": "I-QUESTION", "box": {"xmin": 691, "ymin": 202, "xmax": 735, "ymax": 735}, }, ], )
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./examples/research_projects/mlm_wwm/run_mlm_wwm.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for masked language modeling (BERT, ALBERT, RoBERTa...) with whole word masking on a text file or a dataset. Here is the full list of checkpoints on the hub that can be fine-tuned by this script: https://huggingface.co/models?filter=fill-mask """ # You can also adapt this script on your own masked language modeling task. Pointers for this are left as comments. import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process logger = logging.getLogger(__name__) MODEL_CONFIG_CLASSES = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. """ model_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." ) }, ) model_type: Optional[str] = field( default=None, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, ) config_overrides: Optional[str] = field( default=None, metadata={ "help": ( "Override some existing default config settings when a model is trained from scratch. Example: " "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" ) }, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) def __post_init__(self): if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( "--config_overrides can't be used in combination with --config_name or --model_name_or_path" ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) train_ref_file: Optional[str] = field( default=None, metadata={"help": "An optional input train ref data file for whole word masking in Chinese."}, ) validation_ref_file: Optional[str] = field( default=None, metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."}, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) validation_split_percentage: Optional[int] = field( default=5, metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" }, ) max_seq_length: Optional[int] = field( default=None, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated. Default to the max input length of the model." ) }, ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) mlm_probability: float = field( default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} ) pad_to_max_length: bool = field( default=False, metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) }, ) def __post_init__(self): if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def add_chinese_references(dataset, ref_file): with open(ref_file, "r", encoding="utf-8") as f: refs = [json.loads(line) for line in f.read().splitlines() if (len(line) > 0 and not line.isspace())] assert len(dataset) == len(refs) dataset_dict = {c: dataset[c] for c in dataset.column_names} dataset_dict["chinese_ref"] = refs return Dataset.from_dict(dataset_dict) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s", training_args) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name) if "validation" not in datasets.keys(): datasets["validation"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"train[:{data_args.validation_split_percentage}%]", ) datasets["train"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"train[{data_args.validation_split_percentage}%:]", ) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.train_file.split(".")[-1] if extension == "txt": extension = "text" datasets = load_dataset(extension, data_files=data_files) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config_kwargs = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name: config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs) elif model_args.model_name_or_path: config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs) else: config = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.config_overrides is not None: logger.info(f"Overriding config: {model_args.config_overrides}") config.update_from_string(model_args.config_overrides) logger.info(f"New config: {config}") tokenizer_kwargs = { "cache_dir": model_args.cache_dir, "use_fast": model_args.use_fast_tokenizer, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs) elif model_args.model_name_or_path: tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if model_args.model_name_or_path: model = AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) else: logger.info("Training new model from scratch") model = AutoModelForMaskedLM.from_config(config) model.resize_token_embeddings(len(tokenizer)) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: column_names = datasets["train"].column_names else: column_names = datasets["validation"].column_names text_column_name = "text" if "text" in column_names else column_names[0] padding = "max_length" if data_args.pad_to_max_length else False def tokenize_function(examples): # Remove empty lines examples["text"] = [line for line in examples["text"] if len(line) > 0 and not line.isspace()] return tokenizer(examples["text"], padding=padding, truncation=True, max_length=data_args.max_seq_length) tokenized_datasets = datasets.map( tokenize_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=[text_column_name], load_from_cache_file=not data_args.overwrite_cache, ) # Add the chinese references if provided if data_args.train_ref_file is not None: tokenized_datasets["train"] = add_chinese_references(tokenized_datasets["train"], data_args.train_ref_file) if data_args.validation_ref_file is not None: tokenized_datasets["validation"] = add_chinese_references( tokenized_datasets["validation"], data_args.validation_ref_file ) # If we have ref files, need to avoid it removed by trainer has_ref = data_args.train_ref_file or data_args.validation_ref_file if has_ref: training_args.remove_unused_columns = False # Data collator # This one will take care of randomly masking the tokens. data_collator = DataCollatorForWholeWordMask(tokenizer=tokenizer, mlm_probability=data_args.mlm_probability) # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=tokenized_datasets["train"] if training_args.do_train else None, eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None, tokenizer=tokenizer, data_collator=data_collator, ) # Training if training_args.do_train: if last_checkpoint is not None: checkpoint = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path): checkpoint = model_args.model_name_or_path else: checkpoint = None train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload output_train_file = os.path.join(training_args.output_dir, "train_results.txt") if trainer.is_world_process_zero(): with open(output_train_file, "w") as writer: logger.info("***** Train results *****") for key, value in sorted(train_result.metrics.items()): logger.info(f" {key} = {value}") writer.write(f"{key} = {value}\n") # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir, "trainer_state.json")) # Evaluation results = {} if training_args.do_eval: logger.info("*** Evaluate ***") eval_output = trainer.evaluate() perplexity = math.exp(eval_output["eval_loss"]) results["perplexity"] = perplexity output_eval_file = os.path.join(training_args.output_dir, "eval_results_mlm_wwm.txt") if trainer.is_world_process_zero(): with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key, value in sorted(results.items()): logger.info(f" {key} = {value}") writer.write(f"{key} = {value}\n") return results def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
# coding=utf-8 # Copyright 2020 The HuggingFace Team All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for masked language modeling (BERT, ALBERT, RoBERTa...) with whole word masking on a text file or a dataset. Here is the full list of checkpoints on the hub that can be fine-tuned by this script: https://huggingface.co/models?filter=fill-mask """ # You can also adapt this script on your own masked language modeling task. Pointers for this are left as comments. import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process logger = logging.getLogger(__name__) MODEL_CONFIG_CLASSES = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. """ model_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." ) }, ) model_type: Optional[str] = field( default=None, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, ) config_overrides: Optional[str] = field( default=None, metadata={ "help": ( "Override some existing default config settings when a model is trained from scratch. Example: " "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" ) }, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) def __post_init__(self): if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( "--config_overrides can't be used in combination with --config_name or --model_name_or_path" ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) train_ref_file: Optional[str] = field( default=None, metadata={"help": "An optional input train ref data file for whole word masking in Chinese."}, ) validation_ref_file: Optional[str] = field( default=None, metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."}, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) validation_split_percentage: Optional[int] = field( default=5, metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" }, ) max_seq_length: Optional[int] = field( default=None, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated. Default to the max input length of the model." ) }, ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) mlm_probability: float = field( default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} ) pad_to_max_length: bool = field( default=False, metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) }, ) def __post_init__(self): if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def add_chinese_references(dataset, ref_file): with open(ref_file, "r", encoding="utf-8") as f: refs = [json.loads(line) for line in f.read().splitlines() if (len(line) > 0 and not line.isspace())] assert len(dataset) == len(refs) dataset_dict = {c: dataset[c] for c in dataset.column_names} dataset_dict["chinese_ref"] = refs return Dataset.from_dict(dataset_dict) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s", training_args) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name) if "validation" not in datasets.keys(): datasets["validation"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"train[:{data_args.validation_split_percentage}%]", ) datasets["train"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"train[{data_args.validation_split_percentage}%:]", ) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.train_file.split(".")[-1] if extension == "txt": extension = "text" datasets = load_dataset(extension, data_files=data_files) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config_kwargs = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name: config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs) elif model_args.model_name_or_path: config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs) else: config = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.config_overrides is not None: logger.info(f"Overriding config: {model_args.config_overrides}") config.update_from_string(model_args.config_overrides) logger.info(f"New config: {config}") tokenizer_kwargs = { "cache_dir": model_args.cache_dir, "use_fast": model_args.use_fast_tokenizer, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs) elif model_args.model_name_or_path: tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if model_args.model_name_or_path: model = AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) else: logger.info("Training new model from scratch") model = AutoModelForMaskedLM.from_config(config) model.resize_token_embeddings(len(tokenizer)) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: column_names = datasets["train"].column_names else: column_names = datasets["validation"].column_names text_column_name = "text" if "text" in column_names else column_names[0] padding = "max_length" if data_args.pad_to_max_length else False def tokenize_function(examples): # Remove empty lines examples["text"] = [line for line in examples["text"] if len(line) > 0 and not line.isspace()] return tokenizer(examples["text"], padding=padding, truncation=True, max_length=data_args.max_seq_length) tokenized_datasets = datasets.map( tokenize_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=[text_column_name], load_from_cache_file=not data_args.overwrite_cache, ) # Add the chinese references if provided if data_args.train_ref_file is not None: tokenized_datasets["train"] = add_chinese_references(tokenized_datasets["train"], data_args.train_ref_file) if data_args.validation_ref_file is not None: tokenized_datasets["validation"] = add_chinese_references( tokenized_datasets["validation"], data_args.validation_ref_file ) # If we have ref files, need to avoid it removed by trainer has_ref = data_args.train_ref_file or data_args.validation_ref_file if has_ref: training_args.remove_unused_columns = False # Data collator # This one will take care of randomly masking the tokens. data_collator = DataCollatorForWholeWordMask(tokenizer=tokenizer, mlm_probability=data_args.mlm_probability) # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=tokenized_datasets["train"] if training_args.do_train else None, eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None, tokenizer=tokenizer, data_collator=data_collator, ) # Training if training_args.do_train: if last_checkpoint is not None: checkpoint = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path): checkpoint = model_args.model_name_or_path else: checkpoint = None train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload output_train_file = os.path.join(training_args.output_dir, "train_results.txt") if trainer.is_world_process_zero(): with open(output_train_file, "w") as writer: logger.info("***** Train results *****") for key, value in sorted(train_result.metrics.items()): logger.info(f" {key} = {value}") writer.write(f"{key} = {value}\n") # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir, "trainer_state.json")) # Evaluation results = {} if training_args.do_eval: logger.info("*** Evaluate ***") eval_output = trainer.evaluate() perplexity = math.exp(eval_output["eval_loss"]) results["perplexity"] = perplexity output_eval_file = os.path.join(training_args.output_dir, "eval_results_mlm_wwm.txt") if trainer.is_world_process_zero(): with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key, value in sorted(results.items()): logger.info(f" {key} = {value}") writer.write(f"{key} = {value}\n") return results def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./src/transformers/models/detr/configuration_detr.py
# coding=utf-8 # Copyright 2021 Facebook AI Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ DETR model configuration""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging logger = logging.get_logger(__name__) DETR_PRETRAINED_CONFIG_ARCHIVE_MAP = { "facebook/detr-resnet-50": "https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json", # See all DETR models at https://huggingface.co/models?filter=detr } class DetrConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`DetrModel`]. It is used to instantiate a DETR model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the DETR [facebook/detr-resnet-50](https://huggingface.co/facebook/detr-resnet-50) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_channels (`int`, *optional*, defaults to 3): The number of input channels. num_queries (`int`, *optional*, defaults to 100): Number of object queries, i.e. detection slots. This is the maximal number of objects [`DetrModel`] can detect in a single image. For COCO, we recommend 100 queries. d_model (`int`, *optional*, defaults to 256): Dimension of the layers. encoder_layers (`int`, *optional*, defaults to 6): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 6): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimension of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimension of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. init_xavier_std (`float`, *optional*, defaults to 1): The scaling factor used for the Xavier initialization gain in the HM Attention map module. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. auxiliary_loss (`bool`, *optional*, defaults to `False`): Whether auxiliary decoding losses (loss at each decoder layer) are to be used. position_embedding_type (`str`, *optional*, defaults to `"sine"`): Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`. backbone (`str`, *optional*, defaults to `"resnet50"`): Name of convolutional backbone to use. Supports any convolutional backbone from the timm package. For a list of all available models, see [this page](https://rwightman.github.io/pytorch-image-models/#load-a-pretrained-model). use_pretrained_backbone (`bool`, *optional*, defaults to `True`): Whether to use pretrained weights for the backbone. dilation (`bool`, *optional*, defaults to `False`): Whether to replace stride with dilation in the last convolutional block (DC5). class_cost (`float`, *optional*, defaults to 1): Relative weight of the classification error in the Hungarian matching cost. bbox_cost (`float`, *optional*, defaults to 5): Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost. giou_cost (`float`, *optional*, defaults to 2): Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost. mask_loss_coefficient (`float`, *optional*, defaults to 1): Relative weight of the Focal loss in the panoptic segmentation loss. dice_loss_coefficient (`float`, *optional*, defaults to 1): Relative weight of the DICE/F-1 loss in the panoptic segmentation loss. bbox_loss_coefficient (`float`, *optional*, defaults to 5): Relative weight of the L1 bounding box loss in the object detection loss. giou_loss_coefficient (`float`, *optional*, defaults to 2): Relative weight of the generalized IoU loss in the object detection loss. eos_coefficient (`float`, *optional*, defaults to 0.1): Relative classification weight of the 'no-object' class in the object detection loss. Examples: ```python >>> from transformers import DetrConfig, DetrModel >>> # Initializing a DETR facebook/detr-resnet-50 style configuration >>> configuration = DetrConfig() >>> # Initializing a model (with random weights) from the facebook/detr-resnet-50 style configuration >>> model = DetrModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "detr" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self, num_channels=3, num_queries=100, max_position_embeddings=1024, encoder_layers=6, encoder_ffn_dim=2048, encoder_attention_heads=8, decoder_layers=6, decoder_ffn_dim=2048, decoder_attention_heads=8, encoder_layerdrop=0.0, decoder_layerdrop=0.0, is_encoder_decoder=True, activation_function="relu", d_model=256, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, init_xavier_std=1.0, classifier_dropout=0.0, scale_embedding=False, auxiliary_loss=False, position_embedding_type="sine", backbone="resnet50", use_pretrained_backbone=True, dilation=False, class_cost=1, bbox_cost=5, giou_cost=2, mask_loss_coefficient=1, dice_loss_coefficient=1, bbox_loss_coefficient=5, giou_loss_coefficient=2, eos_coefficient=0.1, **kwargs ): self.num_channels = num_channels self.num_queries = num_queries self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.init_xavier_std = init_xavier_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.num_hidden_layers = encoder_layers self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True self.auxiliary_loss = auxiliary_loss self.position_embedding_type = position_embedding_type self.backbone = backbone self.use_pretrained_backbone = use_pretrained_backbone self.dilation = dilation # Hungarian matcher self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost # Loss coefficients self.mask_loss_coefficient = mask_loss_coefficient self.dice_loss_coefficient = dice_loss_coefficient self.bbox_loss_coefficient = bbox_loss_coefficient self.giou_loss_coefficient = giou_loss_coefficient self.eos_coefficient = eos_coefficient super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs) @property def num_attention_heads(self) -> int: return self.encoder_attention_heads @property def hidden_size(self) -> int: return self.d_model class DetrOnnxConfig(OnnxConfig): torch_onnx_minimum_version = version.parse("1.11") @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def atol_for_validation(self) -> float: return 1e-5 @property def default_onnx_opset(self) -> int: return 12
# coding=utf-8 # Copyright 2021 Facebook AI Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ DETR model configuration""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging logger = logging.get_logger(__name__) DETR_PRETRAINED_CONFIG_ARCHIVE_MAP = { "facebook/detr-resnet-50": "https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json", # See all DETR models at https://huggingface.co/models?filter=detr } class DetrConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`DetrModel`]. It is used to instantiate a DETR model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the DETR [facebook/detr-resnet-50](https://huggingface.co/facebook/detr-resnet-50) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_channels (`int`, *optional*, defaults to 3): The number of input channels. num_queries (`int`, *optional*, defaults to 100): Number of object queries, i.e. detection slots. This is the maximal number of objects [`DetrModel`] can detect in a single image. For COCO, we recommend 100 queries. d_model (`int`, *optional*, defaults to 256): Dimension of the layers. encoder_layers (`int`, *optional*, defaults to 6): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 6): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimension of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimension of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. init_xavier_std (`float`, *optional*, defaults to 1): The scaling factor used for the Xavier initialization gain in the HM Attention map module. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. auxiliary_loss (`bool`, *optional*, defaults to `False`): Whether auxiliary decoding losses (loss at each decoder layer) are to be used. position_embedding_type (`str`, *optional*, defaults to `"sine"`): Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`. backbone (`str`, *optional*, defaults to `"resnet50"`): Name of convolutional backbone to use. Supports any convolutional backbone from the timm package. For a list of all available models, see [this page](https://rwightman.github.io/pytorch-image-models/#load-a-pretrained-model). use_pretrained_backbone (`bool`, *optional*, defaults to `True`): Whether to use pretrained weights for the backbone. dilation (`bool`, *optional*, defaults to `False`): Whether to replace stride with dilation in the last convolutional block (DC5). class_cost (`float`, *optional*, defaults to 1): Relative weight of the classification error in the Hungarian matching cost. bbox_cost (`float`, *optional*, defaults to 5): Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost. giou_cost (`float`, *optional*, defaults to 2): Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost. mask_loss_coefficient (`float`, *optional*, defaults to 1): Relative weight of the Focal loss in the panoptic segmentation loss. dice_loss_coefficient (`float`, *optional*, defaults to 1): Relative weight of the DICE/F-1 loss in the panoptic segmentation loss. bbox_loss_coefficient (`float`, *optional*, defaults to 5): Relative weight of the L1 bounding box loss in the object detection loss. giou_loss_coefficient (`float`, *optional*, defaults to 2): Relative weight of the generalized IoU loss in the object detection loss. eos_coefficient (`float`, *optional*, defaults to 0.1): Relative classification weight of the 'no-object' class in the object detection loss. Examples: ```python >>> from transformers import DetrConfig, DetrModel >>> # Initializing a DETR facebook/detr-resnet-50 style configuration >>> configuration = DetrConfig() >>> # Initializing a model (with random weights) from the facebook/detr-resnet-50 style configuration >>> model = DetrModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "detr" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self, num_channels=3, num_queries=100, max_position_embeddings=1024, encoder_layers=6, encoder_ffn_dim=2048, encoder_attention_heads=8, decoder_layers=6, decoder_ffn_dim=2048, decoder_attention_heads=8, encoder_layerdrop=0.0, decoder_layerdrop=0.0, is_encoder_decoder=True, activation_function="relu", d_model=256, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, init_xavier_std=1.0, classifier_dropout=0.0, scale_embedding=False, auxiliary_loss=False, position_embedding_type="sine", backbone="resnet50", use_pretrained_backbone=True, dilation=False, class_cost=1, bbox_cost=5, giou_cost=2, mask_loss_coefficient=1, dice_loss_coefficient=1, bbox_loss_coefficient=5, giou_loss_coefficient=2, eos_coefficient=0.1, **kwargs ): self.num_channels = num_channels self.num_queries = num_queries self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.init_xavier_std = init_xavier_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.num_hidden_layers = encoder_layers self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True self.auxiliary_loss = auxiliary_loss self.position_embedding_type = position_embedding_type self.backbone = backbone self.use_pretrained_backbone = use_pretrained_backbone self.dilation = dilation # Hungarian matcher self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost # Loss coefficients self.mask_loss_coefficient = mask_loss_coefficient self.dice_loss_coefficient = dice_loss_coefficient self.bbox_loss_coefficient = bbox_loss_coefficient self.giou_loss_coefficient = giou_loss_coefficient self.eos_coefficient = eos_coefficient super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs) @property def num_attention_heads(self) -> int: return self.encoder_attention_heads @property def hidden_size(self) -> int: return self.d_model class DetrOnnxConfig(OnnxConfig): torch_onnx_minimum_version = version.parse("1.11") @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def atol_for_validation(self) -> float: return 1e-5 @property def default_onnx_opset(self) -> int: return 12
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./docker/transformers-pytorch-tpu/docker-entrypoint.sh
#!/bin/bash source ~/.bashrc echo "running docker-entrypoint.sh" conda activate container echo $KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS echo "printed TPU info" export XRT_TPU_CONFIG="tpu_worker;0;${KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS:7}" exec "$@"#!/bin/bash
#!/bin/bash source ~/.bashrc echo "running docker-entrypoint.sh" conda activate container echo $KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS echo "printed TPU info" export XRT_TPU_CONFIG="tpu_worker;0;${KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS:7}" exec "$@"#!/bin/bash
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./tests/repo_utils/test_check_dummies.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import unittest git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path check_dummies.PATH_TO_TRANSFORMERS = os.path.join(git_repo_path, "src", "transformers") DUMMY_CONSTANT = """ {0} = None """ DUMMY_CLASS = """ class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) """ DUMMY_FUNCTION = """ def {0}(*args, **kwargs): requires_backends({0}, {1}) """ class CheckDummiesTester(unittest.TestCase): def test_find_backend(self): no_backend = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")') self.assertIsNone(no_backend) simple_backend = find_backend(" if not is_tokenizers_available():") self.assertEqual(simple_backend, "tokenizers") backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") self.assertEqual(backend_with_underscore, "tensorflow_text") double_backend = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):") self.assertEqual(double_backend, "sentencepiece_and_tokenizers") double_backend_with_underscore = find_backend( " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" ) self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") triple_backend = find_backend( " if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" ) self.assertEqual(triple_backend, "sentencepiece_and_tokenizers_and_vision") def test_read_init(self): objects = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch", objects) self.assertIn("tensorflow_text", objects) self.assertIn("sentencepiece_and_tokenizers", objects) # Likewise, we can't assert on the exact content of a key self.assertIn("BertModel", objects["torch"]) self.assertIn("TFBertModel", objects["tf"]) self.assertIn("FlaxBertModel", objects["flax"]) self.assertIn("BertModel", objects["torch"]) self.assertIn("TFBertTokenizer", objects["tensorflow_text"]) self.assertIn("convert_slow_tokenizer", objects["sentencepiece_and_tokenizers"]) def test_create_dummy_object(self): dummy_constant = create_dummy_object("CONSTANT", "'torch'") self.assertEqual(dummy_constant, "\nCONSTANT = None\n") dummy_function = create_dummy_object("function", "'torch'") self.assertEqual( dummy_function, "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" ) expected_dummy_class = """ class FakeClass(metaclass=DummyObject): _backends = 'torch' def __init__(self, *args, **kwargs): requires_backends(self, 'torch') """ dummy_class = create_dummy_object("FakeClass", "'torch'") self.assertEqual(dummy_class, expected_dummy_class) def test_create_dummy_files(self): expected_dummy_pytorch_file = """# This file is autogenerated by the command `make fix-copies`, do not edit. # flake8: noqa from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, ["torch"]) class FakeClass(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) """ dummy_files = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]}) self.assertEqual(dummy_files["torch"], expected_dummy_pytorch_file)
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import unittest git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path check_dummies.PATH_TO_TRANSFORMERS = os.path.join(git_repo_path, "src", "transformers") DUMMY_CONSTANT = """ {0} = None """ DUMMY_CLASS = """ class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) """ DUMMY_FUNCTION = """ def {0}(*args, **kwargs): requires_backends({0}, {1}) """ class CheckDummiesTester(unittest.TestCase): def test_find_backend(self): no_backend = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")') self.assertIsNone(no_backend) simple_backend = find_backend(" if not is_tokenizers_available():") self.assertEqual(simple_backend, "tokenizers") backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") self.assertEqual(backend_with_underscore, "tensorflow_text") double_backend = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):") self.assertEqual(double_backend, "sentencepiece_and_tokenizers") double_backend_with_underscore = find_backend( " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" ) self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") triple_backend = find_backend( " if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" ) self.assertEqual(triple_backend, "sentencepiece_and_tokenizers_and_vision") def test_read_init(self): objects = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch", objects) self.assertIn("tensorflow_text", objects) self.assertIn("sentencepiece_and_tokenizers", objects) # Likewise, we can't assert on the exact content of a key self.assertIn("BertModel", objects["torch"]) self.assertIn("TFBertModel", objects["tf"]) self.assertIn("FlaxBertModel", objects["flax"]) self.assertIn("BertModel", objects["torch"]) self.assertIn("TFBertTokenizer", objects["tensorflow_text"]) self.assertIn("convert_slow_tokenizer", objects["sentencepiece_and_tokenizers"]) def test_create_dummy_object(self): dummy_constant = create_dummy_object("CONSTANT", "'torch'") self.assertEqual(dummy_constant, "\nCONSTANT = None\n") dummy_function = create_dummy_object("function", "'torch'") self.assertEqual( dummy_function, "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" ) expected_dummy_class = """ class FakeClass(metaclass=DummyObject): _backends = 'torch' def __init__(self, *args, **kwargs): requires_backends(self, 'torch') """ dummy_class = create_dummy_object("FakeClass", "'torch'") self.assertEqual(dummy_class, expected_dummy_class) def test_create_dummy_files(self): expected_dummy_pytorch_file = """# This file is autogenerated by the command `make fix-copies`, do not edit. # flake8: noqa from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, ["torch"]) class FakeClass(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) """ dummy_files = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]}) self.assertEqual(dummy_files["torch"], expected_dummy_pytorch_file)
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./src/transformers/models/squeezebert/tokenization_squeezebert.py
# coding=utf-8 # Copyright 2020 The SqueezeBert authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for SqueezeBERT.""" import collections import os import unicodedata from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt" ), "squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt", "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt" ), } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "squeezebert/squeezebert-uncased": 512, "squeezebert/squeezebert-mnli": 512, "squeezebert/squeezebert-mnli-headless": 512, } PRETRAINED_INIT_CONFIGURATION = { "squeezebert/squeezebert-uncased": {"do_lower_case": True}, "squeezebert/squeezebert-mnli": {"do_lower_case": True}, "squeezebert/squeezebert-mnli-headless": {"do_lower_case": True}, } # Copied from transformers.models.bert.tokenization_bert.load_vocab def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() with open(vocab_file, "r", encoding="utf-8") as reader: tokens = reader.readlines() for index, token in enumerate(tokens): token = token.rstrip("\n") vocab[token] = index return vocab # Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens # Copied from transformers.models.bert.tokenization_bert.BertTokenizer with Bert->SqueezeBert,BERT->SqueezeBERT class SqueezeBertTokenizer(PreTrainedTokenizer): r""" Construct a SqueezeBERT tokenizer. Based on WordPiece. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. do_basic_tokenize (`bool`, *optional*, defaults to `True`): Whether or not to do basic tokenization before WordPiece. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original SqueezeBERT). """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", tokenize_chinese_chars=True, strip_accents=None, **kwargs ): super().__init__( do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs, ) if not os.path.isfile(vocab_file): raise ValueError( f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained" " model use `tokenizer = SqueezeBertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer( do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, ) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token) @property def do_lower_case(self): return self.basic_tokenizer.do_lower_case @property def vocab_size(self): return len(self.vocab) def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder) def _tokenize(self, text): split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): # If the token is part of the never_split set if token in self.basic_tokenizer.never_split: split_tokens.append(token) else: split_tokens += self.wordpiece_tokenizer.tokenize(token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" out_string = " ".join(tokens).replace(" ##", "").strip() return out_string def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A SqueezeBERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A SqueezeBERT sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory with open(vocab_file, "w", encoding="utf-8") as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!" ) index = token_index writer.write(token + "\n") index += 1 return (vocab_file,) # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer class BasicTokenizer(object): """ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). Args: do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). """ def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None): if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents def tokenize(self, text, never_split=None): """ Basic Tokenization of a piece of text. Split on "white spaces" only, for sub-word tokenization, see WordPieceTokenizer. Args: never_split (`List[str]`, *optional*) Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of token not to split. """ # union() returns a new set by concatenating the two sets. never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text, never_split=None): """Splits punctuation on a piece of text.""" if never_split is not None and text in never_split: return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) # or (cp >= 0x20000 and cp <= 0x2A6DF) # or (cp >= 0x2A700 and cp <= 0x2B73F) # or (cp >= 0x2B740 and cp <= 0x2B81F) # or (cp >= 0x2B820 and cp <= 0x2CEAF) # or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) # ): # return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xFFFD or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) class WordpieceTokenizer(object): """Runs WordPiece tokenization.""" def __init__(self, vocab, unk_token, max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): """ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`. Args: text: A single token or whitespace separated tokens. This should have already been passed through *BasicTokenizer*. Returns: A list of wordpiece tokens. """ output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens
# coding=utf-8 # Copyright 2020 The SqueezeBert authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for SqueezeBERT.""" import collections import os import unicodedata from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt" ), "squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt", "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt" ), } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "squeezebert/squeezebert-uncased": 512, "squeezebert/squeezebert-mnli": 512, "squeezebert/squeezebert-mnli-headless": 512, } PRETRAINED_INIT_CONFIGURATION = { "squeezebert/squeezebert-uncased": {"do_lower_case": True}, "squeezebert/squeezebert-mnli": {"do_lower_case": True}, "squeezebert/squeezebert-mnli-headless": {"do_lower_case": True}, } # Copied from transformers.models.bert.tokenization_bert.load_vocab def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() with open(vocab_file, "r", encoding="utf-8") as reader: tokens = reader.readlines() for index, token in enumerate(tokens): token = token.rstrip("\n") vocab[token] = index return vocab # Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens # Copied from transformers.models.bert.tokenization_bert.BertTokenizer with Bert->SqueezeBert,BERT->SqueezeBERT class SqueezeBertTokenizer(PreTrainedTokenizer): r""" Construct a SqueezeBERT tokenizer. Based on WordPiece. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. do_basic_tokenize (`bool`, *optional*, defaults to `True`): Whether or not to do basic tokenization before WordPiece. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original SqueezeBERT). """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", tokenize_chinese_chars=True, strip_accents=None, **kwargs ): super().__init__( do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs, ) if not os.path.isfile(vocab_file): raise ValueError( f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained" " model use `tokenizer = SqueezeBertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer( do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, ) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token) @property def do_lower_case(self): return self.basic_tokenizer.do_lower_case @property def vocab_size(self): return len(self.vocab) def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder) def _tokenize(self, text): split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): # If the token is part of the never_split set if token in self.basic_tokenizer.never_split: split_tokens.append(token) else: split_tokens += self.wordpiece_tokenizer.tokenize(token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" out_string = " ".join(tokens).replace(" ##", "").strip() return out_string def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A SqueezeBERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A SqueezeBERT sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory with open(vocab_file, "w", encoding="utf-8") as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!" ) index = token_index writer.write(token + "\n") index += 1 return (vocab_file,) # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer class BasicTokenizer(object): """ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). Args: do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). """ def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None): if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents def tokenize(self, text, never_split=None): """ Basic Tokenization of a piece of text. Split on "white spaces" only, for sub-word tokenization, see WordPieceTokenizer. Args: never_split (`List[str]`, *optional*) Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of token not to split. """ # union() returns a new set by concatenating the two sets. never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text, never_split=None): """Splits punctuation on a piece of text.""" if never_split is not None and text in never_split: return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) # or (cp >= 0x20000 and cp <= 0x2A6DF) # or (cp >= 0x2A700 and cp <= 0x2B73F) # or (cp >= 0x2B740 and cp <= 0x2B81F) # or (cp >= 0x2B820 and cp <= 0x2CEAF) # or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) # ): # return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xFFFD or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) class WordpieceTokenizer(object): """Runs WordPiece tokenization.""" def __init__(self, vocab, unk_token, max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): """ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`. Args: text: A single token or whitespace separated tokens. This should have already been passed through *BasicTokenizer*. Returns: A list of wordpiece tokens. """ output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./docs/source/en/model_doc/xlnet.mdx
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # XLNet ## Overview The XLNet model was proposed in [XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237) by Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le. XLnet is an extension of the Transformer-XL model pre-trained using an autoregressive method to learn bidirectional contexts by maximizing the expected likelihood over all permutations of the input sequence factorization order. The abstract from the paper is the following: *With the capability of modeling bidirectional contexts, denoising autoencoding based pretraining like BERT achieves better performance than pretraining approaches based on autoregressive language modeling. However, relying on corrupting the input with masks, BERT neglects dependency between the masked positions and suffers from a pretrain-finetune discrepancy. In light of these pros and cons, we propose XLNet, a generalized autoregressive pretraining method that (1) enables learning bidirectional contexts by maximizing the expected likelihood over all permutations of the factorization order and (2) overcomes the limitations of BERT thanks to its autoregressive formulation. Furthermore, XLNet integrates ideas from Transformer-XL, the state-of-the-art autoregressive model, into pretraining. Empirically, under comparable experiment settings, XLNet outperforms BERT on 20 tasks, often by a large margin, including question answering, natural language inference, sentiment analysis, and document ranking.* Tips: - The specific attention pattern can be controlled at training and test time using the `perm_mask` input. - Due to the difficulty of training a fully auto-regressive model over various factorization order, XLNet is pretrained using only a sub-set of the output tokens as target which are selected with the `target_mapping` input. - To use XLNet for sequential decoding (i.e. not in fully bi-directional setting), use the `perm_mask` and `target_mapping` inputs to control the attention span and outputs (see examples in *examples/pytorch/text-generation/run_generation.py*) - XLNet is one of the few models that has no sequence length limit. This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The original code can be found [here](https://github.com/zihangdai/xlnet/). ## XLNetConfig [[autodoc]] XLNetConfig ## XLNetTokenizer [[autodoc]] XLNetTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## XLNetTokenizerFast [[autodoc]] XLNetTokenizerFast ## XLNet specific outputs [[autodoc]] models.xlnet.modeling_xlnet.XLNetModelOutput [[autodoc]] models.xlnet.modeling_xlnet.XLNetLMHeadModelOutput [[autodoc]] models.xlnet.modeling_xlnet.XLNetForSequenceClassificationOutput [[autodoc]] models.xlnet.modeling_xlnet.XLNetForMultipleChoiceOutput [[autodoc]] models.xlnet.modeling_xlnet.XLNetForTokenClassificationOutput [[autodoc]] models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringSimpleOutput [[autodoc]] models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringOutput [[autodoc]] models.xlnet.modeling_tf_xlnet.TFXLNetModelOutput [[autodoc]] models.xlnet.modeling_tf_xlnet.TFXLNetLMHeadModelOutput [[autodoc]] models.xlnet.modeling_tf_xlnet.TFXLNetForSequenceClassificationOutput [[autodoc]] models.xlnet.modeling_tf_xlnet.TFXLNetForMultipleChoiceOutput [[autodoc]] models.xlnet.modeling_tf_xlnet.TFXLNetForTokenClassificationOutput [[autodoc]] models.xlnet.modeling_tf_xlnet.TFXLNetForQuestionAnsweringSimpleOutput ## XLNetModel [[autodoc]] XLNetModel - forward ## XLNetLMHeadModel [[autodoc]] XLNetLMHeadModel - forward ## XLNetForSequenceClassification [[autodoc]] XLNetForSequenceClassification - forward ## XLNetForMultipleChoice [[autodoc]] XLNetForMultipleChoice - forward ## XLNetForTokenClassification [[autodoc]] XLNetForTokenClassification - forward ## XLNetForQuestionAnsweringSimple [[autodoc]] XLNetForQuestionAnsweringSimple - forward ## XLNetForQuestionAnswering [[autodoc]] XLNetForQuestionAnswering - forward ## TFXLNetModel [[autodoc]] TFXLNetModel - call ## TFXLNetLMHeadModel [[autodoc]] TFXLNetLMHeadModel - call ## TFXLNetForSequenceClassification [[autodoc]] TFXLNetForSequenceClassification - call ## TFLNetForMultipleChoice [[autodoc]] TFXLNetForMultipleChoice - call ## TFXLNetForTokenClassification [[autodoc]] TFXLNetForTokenClassification - call ## TFXLNetForQuestionAnsweringSimple [[autodoc]] TFXLNetForQuestionAnsweringSimple - call
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # XLNet ## Overview The XLNet model was proposed in [XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237) by Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le. XLnet is an extension of the Transformer-XL model pre-trained using an autoregressive method to learn bidirectional contexts by maximizing the expected likelihood over all permutations of the input sequence factorization order. The abstract from the paper is the following: *With the capability of modeling bidirectional contexts, denoising autoencoding based pretraining like BERT achieves better performance than pretraining approaches based on autoregressive language modeling. However, relying on corrupting the input with masks, BERT neglects dependency between the masked positions and suffers from a pretrain-finetune discrepancy. In light of these pros and cons, we propose XLNet, a generalized autoregressive pretraining method that (1) enables learning bidirectional contexts by maximizing the expected likelihood over all permutations of the factorization order and (2) overcomes the limitations of BERT thanks to its autoregressive formulation. Furthermore, XLNet integrates ideas from Transformer-XL, the state-of-the-art autoregressive model, into pretraining. Empirically, under comparable experiment settings, XLNet outperforms BERT on 20 tasks, often by a large margin, including question answering, natural language inference, sentiment analysis, and document ranking.* Tips: - The specific attention pattern can be controlled at training and test time using the `perm_mask` input. - Due to the difficulty of training a fully auto-regressive model over various factorization order, XLNet is pretrained using only a sub-set of the output tokens as target which are selected with the `target_mapping` input. - To use XLNet for sequential decoding (i.e. not in fully bi-directional setting), use the `perm_mask` and `target_mapping` inputs to control the attention span and outputs (see examples in *examples/pytorch/text-generation/run_generation.py*) - XLNet is one of the few models that has no sequence length limit. This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The original code can be found [here](https://github.com/zihangdai/xlnet/). ## XLNetConfig [[autodoc]] XLNetConfig ## XLNetTokenizer [[autodoc]] XLNetTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## XLNetTokenizerFast [[autodoc]] XLNetTokenizerFast ## XLNet specific outputs [[autodoc]] models.xlnet.modeling_xlnet.XLNetModelOutput [[autodoc]] models.xlnet.modeling_xlnet.XLNetLMHeadModelOutput [[autodoc]] models.xlnet.modeling_xlnet.XLNetForSequenceClassificationOutput [[autodoc]] models.xlnet.modeling_xlnet.XLNetForMultipleChoiceOutput [[autodoc]] models.xlnet.modeling_xlnet.XLNetForTokenClassificationOutput [[autodoc]] models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringSimpleOutput [[autodoc]] models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringOutput [[autodoc]] models.xlnet.modeling_tf_xlnet.TFXLNetModelOutput [[autodoc]] models.xlnet.modeling_tf_xlnet.TFXLNetLMHeadModelOutput [[autodoc]] models.xlnet.modeling_tf_xlnet.TFXLNetForSequenceClassificationOutput [[autodoc]] models.xlnet.modeling_tf_xlnet.TFXLNetForMultipleChoiceOutput [[autodoc]] models.xlnet.modeling_tf_xlnet.TFXLNetForTokenClassificationOutput [[autodoc]] models.xlnet.modeling_tf_xlnet.TFXLNetForQuestionAnsweringSimpleOutput ## XLNetModel [[autodoc]] XLNetModel - forward ## XLNetLMHeadModel [[autodoc]] XLNetLMHeadModel - forward ## XLNetForSequenceClassification [[autodoc]] XLNetForSequenceClassification - forward ## XLNetForMultipleChoice [[autodoc]] XLNetForMultipleChoice - forward ## XLNetForTokenClassification [[autodoc]] XLNetForTokenClassification - forward ## XLNetForQuestionAnsweringSimple [[autodoc]] XLNetForQuestionAnsweringSimple - forward ## XLNetForQuestionAnswering [[autodoc]] XLNetForQuestionAnswering - forward ## TFXLNetModel [[autodoc]] TFXLNetModel - call ## TFXLNetLMHeadModel [[autodoc]] TFXLNetLMHeadModel - call ## TFXLNetForSequenceClassification [[autodoc]] TFXLNetForSequenceClassification - call ## TFLNetForMultipleChoice [[autodoc]] TFXLNetForMultipleChoice - call ## TFXLNetForTokenClassification [[autodoc]] TFXLNetForTokenClassification - call ## TFXLNetForQuestionAnsweringSimple [[autodoc]] TFXLNetForQuestionAnsweringSimple - call
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./.git/objects/2b/1b9f5a50b6ef369085b756c0aab188e24b580a
x}rWS1#P&EmYT%D@DDvļכ';-3A@mU]̻{ٗQyWӃ`ֳz:<=g{;{;Y<{Ev8y_|{xVqfs<`e3k1 qZ|`~Zp]^fu6.Y6x+&l8 /&a>WLc1<9\g ~. gd{ꪝbt{[m?=|٫G[XxTfٴp\g'X(iN 6/ޫpxmfr0ʧ<"Þ|=xZ˾}&/8~|gG^e_f?3|z=xٿ>n3+*If2!X WA)g~Mp0a_N˟&b8aΰ>F /|. G&,<B;w/giyͯ'{Ffݰ7T??6Δ6 aϟaǗ8|'y9}sۗh&lO~ьOgɃQ)X^8-~UL?3Gvo[8(: ãW/?|g/~='xut/xr9 Օ#\r%VIu['w֙ H̀GgTOgݍ0 =@_ƞchb-X}9ʧdz|{fno R Y?|{x0<mԲ_#pؙ}E/~1""yMB :,OsPw0:+Fw fE r/7# u˙ 0ɘ/tŎ/dy2-q)ڟ}F(*@xϗS]xp{$'YB Qer:~ B1q`j˴haCQ_ Nut:Y1l{\ksDPȶo9L!v6tZN[ * ~*XJ v|<V@ <<6rsJ0 }B| gNV<(#pbYN0>?G.;TC @"FE>/HqrI׊-"D;])]sa+$= iA2ɡev0 z.Y)&Y:+y0,^2Y[^dQs\RN2e0h1iNQ5MuB9+1f)@Hz n=_JGoWd-ݧ=)Ly [fU߿vs@TrbឌEb~4< ."ux О O9D SFўSgJ rm+s#&Kx$kaf/b~C>/g ɁUPk>,>k8glRe&,FkAmg?/^ͯ"kM $LeH)HHYkX<hF%q<sRWDXS+F~@1وSርg;Jh5b QͳxK(*bN->{ G5N"列RnDQ %Xӧ6l7CL ans*S@yU3ËKk ᨼ>2"E9눶 B䎺[j`mLXE>*6aׯV4s>N~Qoc>蜐7c,JDSއ |vk#c7ߨ5g<I_P@6{Ãy=Ua~9GOyǷG\ kG5+640L1tR>?߹XSX6Nuo1_ N/ =&LWT>;*&h1:o˲ڦ۷uܰ yOV ~U8N:hzYlֱڲr\+ 8%vO$J-UP[2x>-JeL'sL|s]vpw;SXWC0R*5)2pKu{G5)*5XmtTN9;p|dxKq3E`tI _o ˩,@[$@(ަzMݾ.LS"{+ H|h.:I9bM%N$)Î[f+ f2a_qe<D,1Z^=]`wxL'cEcm@l%@֐0Y&] dl Š-fR v:/'N'~P<?*? WrJxQ-6݃_.?爎*W^cNR^(=+3Ϟ~d4.Ϊ`热Wfs2l!W\ dk ^Ϡ*1P2!^,*ʡ(Ҭ2Ym>4:{6xݺtdw:Au`d4 ѧa9D(d+s"&<S.G.Ч)kkJ~+iOh*\#)4.6$Ӂ:wSj ޵WUZZE%RWVRU]%nIv5W$8jz,<&!K /5~qKiu҇U$ц:CYlm {3yPƋDcz>쏨Y>*.;gO @Y QBŌ,GÁS.oAJmll `3Eg;@6#l`c IRE/( `M^E0IdQ9y2f$h8FQb!0QTb5,)je%_JU]1zy Le WZO* CPnIaMxVWU +,Ļ4^yd· +byN} Mn7QY:bk2r"C;]X+= $H@ bM3$ƨ ƧQ䴘% G^3j^j]5V-h"k>E ildٺ\im/{Cugvt2ߋLjӷۉ^n[p${?y;5,4@ЇpXEb"E `8sDBx3 %EH,!F6`}V<NC y 1[lz|A4`CF3R$] To>{}iZQno@Ƿ T+:7s~U>y<{2-/'QsK%Ѕ&7 3FpzŇoz.z ?~ .5ȧNp aP_LBjI$B9cC?O@imi'Ǡg/4vr/7r]#pV%p+:LЍ&qNt01N4w.f84>?BZ?GH,&:"ӧߐA,>5sf! 6z"hq]R (Zk}v )Zv8`0-\s8ɂ%/,wDRôATd|q`V!ڛMcpbR%E /\ ) L:6l1T>2qz?5X}E"/{ѫd f#  B|t8TDɐRĶO_1CF>}3I3&W;_w}y6>BÓgHR)gHY_q@I1#.GsQ;R! )R/{*4ʡkU'qJ"q} 78U$y <!WP.IcD26dKguSG^NW:[v-Yp0>ɉ6J$Ft(ҋ(ЉO6oMꉶjkn LzE, + {mb \,l|Ȫᨢ#j~px1Tr@ggcBva h,1I5TQ./."3 /2 c ADp2ӧrr@ix h wy!6BsGFy3#q̣cB]2,6 'i2 8j;ZEG؊j75iB=ʗ.j*džk|r:Uk$amNX20A!-2eB4UNaJh!netahܷ&qD+׍g|N+ޠfk#pmJ/|aƫrzy*fc,TuoH#_q@U A\Jȝg դ>8aa} Jj{J&j-BkO!NP*pk$,vp ~n9)Br?4-{0b,dl' BJ!Ԕl1šoIlk`F3zAr2?=Er\' &UMG{ .G" UCAwWA3t̅S<[}6qQ0CL/ MDda~W/ܞNLxZ]hB|cHh!BCu24 hZM,JlrudC02=nKW%Jk7H#cXnp$6 ő~`h1Q Tqq7N3܁}C@}$e6:ܚDT^(u:DH ټЪO)*Jb yJvV-H5dluK'PP %}@yPXYv-Gkj0C\(@*O\%10G=4kfd*9)?TY9TʡꡖB89ub8Xy6"#0DF+pbm\XM`(pNj[{t%B2I"9;=Mp~ BAڞ2yAA<jYW)p0d AT%rt\,7E O+V^sJnٸ k+䘥 ؤkk_nc# 2PJT5 D0#zpOwb+< }NnFSRv_m1xIXvV6plfw7{+*r,u"f_]j< Ń1y.-NgBtu dSGxp>y>[eKFiL]UUQٴ߬[*b$<ߛH"!3Mσ_~R)JH~Z.g2 I>qL]))H1.OHm@sqj[m}T=ֳH* '2(Y s& @nH >Nu,ouq7a)?@:! +P'inof4tʥٗ(&f5"fUDRrf=~ªN2Τ 4©9  e/ڈ<f6%kێZ|TNp&V<|i\%Zk-aΥ9W.VQJTpw8,"~Wj;ZD^ z|Kl "r| ^X,o)Q!^' 6'_P!ry׬K.' .դtgR{;HB?_96B_W-a.f)ׅ`!AUj aEkEdfr(*[S] ̵wj OIxU.u7۶}1S^TPw#/g2#5kxLpe\PE#&US?hM2!ub^Eތ |t E%K|(<bPRYQo)TB@X(q(m ! N !ZɡyͦQ)my?y!M&:K!"+SX6=C"l)FX",GcT޿si1 1bO]'6<*{(go5U)!l1L*5M-Òܬ MŪWWDdþ#.?-#(vDuݷ3.-%xNc,(ՒcɅ9KfvgQ}H`kYc2 ɔVxUBtR2DCpmu"bv6c150+c3D1 !L~βK:#QVWT]@ r0ʴ?(exn)pG?J'[ $yZ^h;!&n@%ܥy'ݖ<XNty; aQ uz7 ĂݥܗatDu\8ԓq ;?FہA^}r3 -W ͐-`X,t_ݹ QXj-j~w%}pGл_!8M8)sQ԰;PL5TՋw_eY?5!o}M;?yL?i#ή/V3{ LՒMBU͈pZl Nk.0~NGS71[nj7qU?GLqvjjLpݶ1-ي~Ca ԉM,;@q|LETګWz߭Vx0{X^RKnSPt^:QOAts{<a95r5ixR1""jrEIHci<&r]%.&7e/^L\ z^aq%֢ T]7rP[_Q@S0a>#d6dn ߥL}@}_1Lueve 2^}A (TӦR9PDn(+Ư2 Wgy wDJ6XB»[Mh> |?k0ngU kǠe\ߦn;h^͈ߘ~".&rư.v }ԝT/A2\01KGI"rCI qI݇(.:%)g((ZyI1a,g}>3)l*x~{]6<BDcĔ9s:} {+uwyl=5iݠ<k7[@n7D%hx: ?eWR$ՉT^GW_˯i%(_Hm/-uԹ:Z?yij_Zi7YϾcwl[b3It aX0Q;fM+ڤ}NKRVڷ9qxYzSu̦Mտ9/-yW띍Cv_%,aONYls G™JX:q5>F_sH$A&F+!O)edr[H_[6&Z]`cMt==fj)j:ҙ?eWh ($<\ |Eӄ+Z9t3Ku~cRf32W: FXOVSdŊh+u"Gի+Hĕ. W]q&j--xxeɩםVTUn5z}-AY *#"qrzl[B\r?:V%he`L΍"aw;!MNSePTdFA`buE[f;!f}Yx%])/ gHұ/f&kv`q (%G/_ƁQ*CrvXEB$pߵbV_iP"xUS3d4v Ӈ66NMүj9 V :"gfaI'bt_aS>[Ze8YA$Eaub$ȮpZvwmAO&!Ѷq Q^g%!#ԺR' {M)X2p?\ gĻaI֢?ID[N'F:Bw9)75FL,Iu':5\P%<d'<[H;) \ d+Yys~8 u~rR)@U GDe )Gv>֋gmQb#\ND#aIDKc놃0F<)zNZ"t`yPʕ9 ( kC$ix}aȃ=Q!,.Sӿ9{_"_ . ] <`r}D6c2n-W2˯yrabM5LP' qJǨK&Q(({h,_څÊg0Qb_ -,D/@eEO4*eZ?) zVHaK aC/R?(.Sd})uѠX 09+{r%JRS*^&ʭ##JȲrW|I\_L.#ҝMu MAcNiƇ13{ :'_emNZ˿Io@ aF߿c(-=b@+Ei24V[ 'HQ$}8(Ҥӱ;,7 'Xxc@Лjyٹ.c~`KҨ} *{>3a6ٱ|vzx=\bZz G,>MOM^PYQPNɢX6Rd [y[ M5]@RL'J<G]?م,QjJ̓J]0@L{:KcGj) aJh2f?~[N钏dJB?@zAFU>B=>NPjc}-y?X! yP?JR9#8P*?|wiӖau (.~m}@dlv~mM q ˉiQ1CԠܦ1L Փ흹 1dYMdcF WDCɅw k+;OEBl@H GmhK)V'"/c39ʹΎ)+!*V,%?ܳKי0dBī"Y:pJu_X3HDxM (=q,g]&T8lJ"E>~#C|^O0e @IEk@:]gb m=1ҽ1m <6DIh|)@ 8<˥1O)OCIC?^+E2łJd?WC} &A11lOa/Z[`<^&pMȧ*Bmn !9 `Sgp9`wQB6~BJ*K@'kq9Ɍ@@vSC3U/Oe*/IP;~)"򤍐m5zml3';wjWν_|q;}CATNۓwރVV59 /(@"* OpYs\{O9DEV"OHZDQӹPx8gh.`e=ubװ zFf\,,S,C3~ ^Ȋޚ[QE]MkX'Qi$w-B7~Esie6\(M3 .ff "(zwc%s IkGeއ[ЂDe/w ;>%3v?o40uЗ+Ǵ)h(aDDƃ:M Pd~0C|Pp,t"rXb'EUHԽ'}Ѡ-OXJV"A+&ַKl1&7bήR9Kx^efW()^H"+,Wt>1nӈ"m$ ^8 x^r<'3Ժ}d@׋H<gҰ. O}1EU€T8C+Y\r~5@_#->2o':(c|:3䦙\#N^XB_^,;$X,⩆ DUrPb{&YG#pN(ci]^Bd>eqbi^5K<sU>H%˪+A`^ 0FfZu湏rd7<!*y"&?2  F%R;±d 5鄁ҽpWT CS/2і`J͹r+@n0-Cc ̆8'xm%(ˀL?aݯ U--4/%2E VP[Z%*mH`| Jv汰$ԊM -#V=o'%RRaذ_Er#83<Ouϧ(V\{? ocrh%ZPQ pVP~ - >=M^,)ƉZV^T+%GsvGw~hhiȲٷa?u' 1R "f-<߶vq +G\y1OGxěa~Л'LpcM$s/gmZxNU{0fwv"J i}8!M`xpk@XHA@'_8AbZ)d;rEZAL|Jڕ[mϓ?; 0t _!e蚡Ms.|_ kS؏!_eV # _^/aEJh"@PÒt3rߺņȁ&y1Wr)9欐b0|͔ÜLX/ i #Z=E̬V nvzr!«1&ahU@%g]`ʡsup*]F4{$̀z$Q mc`#1OG'w[PW[H (d"55VG6{7=v4[%nQx2'<hPJ\iVgD`a/% cweof'm s[?ͅ^wnV6G $2!&G"-/F R)s^pI)ksIMH[9AX=>S ĆSdCo\b596EE&VR674wIdA쓣Z_=H9 ZP@ɼ IF38rE# 4ӝ~Bz#M-+oʕѾ;N:q! MsrS<bI<=x(,W]Q&ZE\Aߦ`Sh[](,׸l>{]Aщ„'`#gDNZ{RZbCMm$3D@6䳎Ɔn$Q[/u̗q+|VC / Q6: Åֆ~6_Υ%Duפv$n8R*QMYXC !FyyS8V/ 8H>x">{G sFp.O>~7bԏ6fj/Vt7_=Bv$lnӋQRF#?8OV*F%A.O&o͓^ĐG,q؇9f @30 z:d@=#⤜HFWps ۢ b $-Ъ$O.<F^DmdhBvˡ+qqk .T!#Qd-0@@m AA?H"nt8ZO?9<zyG^=9||󇯎^>{Q7.ݸ^pAY1%p62PPo >N sB؈4t2;,r*cYSVc7C(ep 3/sqZ$[zUn`XMt pH~{؉= AمvJfYuXݷi;T uMA2s^"~<eCd Vv 13p%b8ې{/]c ' 6F3c,!O<zn$.ag "1~8p+}4?w,8!=j:%hi\?j&IʘbHDk3vj"̒+\x ڒ5N^uiIJrqmg 5勍t6.x_ZEEO9- xj$p퓲*-[uյQ ̮nB_ފLpKVjLDvJ '~18VDQ#~C42p,_H0:6u/*A#Q knL>ɧA (Wdyy'Yq*.ڏsod&F鸵 h8aXƶ":(G0 !\A&1#8aUo6b DD<8<&\^ylDO*eQf| M?+Ie"VfQ)<^Mu Rz! ժ@lsQ/}J̌l3Vζ8l#~-7YEYI񭐔0l+Mf &}XC"Fg| į%0nPK~˜ :xK-CR^^_i|| 3wa @SV4'ths",kpi!vPɵ0kMG鹻CN2B27SpQDoG\5 U=['$+ޢEp/-k`i6ɩe@ H2,&xP8:<ˇ,!tY:MR?(Yþ2-^-Q䓨$X4%cK=pGi`LѫC?܊_9w]bwТXwbPg]H'rߡ *b+Y?¯k~Cu:Oŏ,<5+cqQ@ttXnf>fAB!݆U$[)* a Dj).f XVZ |}EK.ztCȗ4Q0\C1(Cä;@M;N6CvhӐ EB!"4$x"l-Lyg\ƛ"n8s8M\%upya:g V#%DWj{PCcEDI`v ks9':+0SWt8VL,X>kD5+A|%^, ]sNH Nl)NV3"q!EnH#|㌟@DXyrpPG_)khQŒF)H'^9NKϥ/6NBj-tp:#FS{} Qr _eS b3<$E!>.'C)’+ k3 $S-:;RP?kPQ Zk| yFbU"Pt 遦Re5+ q5\[wa@&pi۪fEfN&bpp+%keF/9]:'!sF=9燷. p+8Rx- {--6@LmE5`B7v*ٺpPtumN*7S`gU+M  Bܧ˽+7?ҢPO5k3P%|)D7ßt[z0b4Hc5154T2h5#!j5z%W=z VX=bkv ɐY EbtA'5RQQe{1:KsF|Lk9c b[:7CKf|yM5Tܽylܤ  I|gI±1Y{Ϩ<VU޼ZswAA#!~#d-JES!!.o{+x%5x z1 f."]T6A[NN껕jၯWjX {a$]TeaY Mb I#^!]>#(*#H&IP.|#Nu@}~:T -IbJ"IwF9vtq >`>LN4-ŚE*&UiEqq3Jꢩ3Ƭ>8VIᖎHh{h>Ld)d{qGJ-- E[ܕۅ['Hspaס:pťCS{>6UPdMh,v`*DXj)@$'>8+4blWxcrC0p kwewq]  mE+T=2ggWOb7|D]9]5 ݍ7iV*= grk¼] ǟ Mh+!0jc[*WDg;HKPx8_Ii2h7:nwD CFp̎NB(^K~yI=?H \s#4gQz {@(nV1( U}1-(X w쒰tgiz+E^IiORW9dLrvh֤N mceJHpX*HgÜ$/ r \Xg\H..cHư/%G -j N(G“)Q.jŠLQ\T(RtGDU1ZtKET!Vtudn;Xɽ/?% iʥz@_@K&M,]X{ܢ꭮<(BPI'!> )vFݭC1Y "e D hN7tÅKSJM)8vr|Թ_4:4p)d"; 2*~95` rՐ)-iz NQGX&{t7~Ǫ.Şt- ;`M քg,:ڭ2~(ɪ";JC dxO=eT`XbebhUe! DۢZid_=.~JHa?:n ͵ά%(Kk%œNjd T %q|6@^HĸȒ2( d.JHjT |h0Dnѵrg LdE?!A={,AT' )fZs6HѢD,vF lS\c 2#j'd *g!I9\ ֻpF]1Lp6Mz2a `fY2.5 8xR> ]˾ m<54n_&/Lӱ݃ܕ#vF-t͎Gj8H1ۘ}|;']PLHJCec77 y ^4'4hLX:{<=qv:X.5qÿ`]we.W w[C*WAR ʔIG.x5/\Q 0Enzݾ2pFڷe@~[Q"Ң@|':OU&;M aMW1hE7T |qPM_oCޑ(iT~d&Xkj3"^|D_8A VU TF!<Si(4?ݍw'ZVn}#~p:n'Z_Sf;oed(no7Q/2E,,ayj{]7Ϭ Kdtgs@N$Xu3NS"<!.U<ԥ =GPˎm,7Z&S'-l4 "એS{J:JK,UY}Y1ffxRsjPػCd_R$|vv,mIGfno`Bu~%cI ;`xv{V$TXҢДP:L#(5gFIFıvDV9,&U|a0MUR/*A7EO0gFP׬_Y>qBrO@cڮ υ{ޟf;JJB' hFfnKXF?7UAѰq4ŗT#͆ UQ,Cۮ;j?Ӌ14C)+et=&w*Ŧ(in.]ӎ'}㛎W7zx_y$%k:A[&#/9cg8b@&ز~~!Ӎ7"iuyWG%qΠe8 'lir~K\M60=鴜, OU% ΜEvL]c]NPAIBQbn86[K89ƽnQ3I}.[9<zm9Gl]p{3@WGOGrD2fX~  s޲vrtbNsnTf9PpmU`6Jw @؂]/ZЉ`H~aMTdz<Tj}4>S5KXK8 KF7oת#_e3}|oCPJ۳oY7pʬ+WFA{ruX݊dծ %`6 z:s{K8**p.?T8s!W(n*SҰ8@[Kz$sմ!;~?'i%u,j95 ;k)Y S/NJ%cKt<dH/A8 jp~]oGtaXZ(psVM oSݮ O`Gm(,-<+Z!ŘAӯ0zcv\U]BHɧz:UJ%{t^^O'?fv矘6v3!'ffè׍kq!*FAC"{x_/Ɔf<k@wPv(SG;[*h񟝷Qbp{qYK8>$G "jǖ=CW\8}(E6 -s3.􎘕%/GEgt_Cch5ɝ +7cq`=S)x pDx2¾0/_^%e|G5+ql0d@' [v<cpǜqQ T0Hh3(DĹ&0EPFI!hH“G 'h`+υ>fy46lL,5Lgo-Ampb cҍ6F^: t8Y}P QSw 0͎"Tߍ)TroԓxHXaPg sB 1NR!L?Q>Aӻrڢ286pbD۷>#fW:K̝CRU`C9U e~0Zf睯ؤ|OL>{XՕs9jC:)$8Kkmޯ= a%d =FTWTWyi1^G]f*YjPv+z\34(ȇ9*~O+E1i"{j絠޵C4@j} jѝOkD *g\x-s4T;;S *!BZ'bZ&s)!&Z ww-V<eN;j0@du4촿8游]R޽f^U]bp{Bٍe4|k&Sk"EfGeJ ٳbBR ,CYr0N.OhsVҹǂ hRhӳBr&aFo,k*&R&lQ"EXK7"ED :ޛ'J{a `)LAMJ .c' 1/#%ĐuU[%xFt$ 1!Mr)@ED Yϱ[7sA_!f@c3:t,#Krb]¬c+*yz Q>nK ؈l]X:v;^]\ !+I(3;1Y+ f1 rtj(9bg@4=U7[DL]ȵZ4y9jK L^!IACl(=P5۲ V"-ͷM^aY?ޓ"*@pQCPt.…F1Yګ.l5OíLd#Z9_.d&԰bY%D1IJHP! Blp/,xژ5aZ ES:!"`=lxʥ=CK>C_Y|fc>mQ>zuV_.<GqݲZ-d+!k5uj=+o@%[_A&󱡰8^hB,g0 LCa<<,8  eTMG8 t 8Jhx2_3ZjP /FI~/T<ղ6b5 #ۍ@|C8<d#Rd4ٴ4Lr J[h p[ʲ>J.ɶM)k g2sڒ@1:g<{~!.:LI<XDGo"RNP ~dJω6iQ24fG iٴm\4{*,%G{amzֲ|S,qɈ΀ٵ+;O\ <:J) 2 vQqBjbާ0rhԎQwi𵵔3d=#F̿eEŠ<_:wZep'\]Y +<1 ׍;'vv5k0!seYUo"gkRBAUq|5GoѰ,! |@@!G ?J!di(pvyB(ڽr[[3 r[HzoC?>t5HX {1 2ے"mn uG!-Y;, A1r N@AٴO`zdh` .XSȢ{I w^,(7I@UFEPB (l5+A=ۃUE.2aqObۅp^`@[$;gZc~~#FZa=,n֢EWL鶂|4F󻶠q1\+2!teFB SiVQX^RIx⽯_0C<yᇘgsl>`ZeMζ識ag#ծP܃gWo1vw{;{_9ػY]o۸Da,Q(o12w4&|1ȳ0% BÍEBVZL=Wzஜ0׬gJcDZQb Ǚ[ZMtˠD2 ʅIͥbUDSBuΪLOqBTκW cqNx G%9[mOiEV&@0a З8\Z 0R s]u3pQ!AE]>1߼;_6+Vydg+_Di0 lγȮ̲pzőhǯBP'Wrg3P)F4)DC;x&Bg4 @. /%h<R -0[qEc:20DSK v [ s/+y)'׫SO;HNV B]BP$˱{o=D]ijj:i’t:1̫[ώ`#cY)TT~-$$JBQ,\: aN'4l>/-8L+ D?"(nד՘ƨPpۦoR'wf%o )!Q> 4>FhGUO|3}g"#}/ǤbXܭֱ*T|BE'%dzj@aWvҢV:#@:*4,J2Y|)>4TFTjշ4yn H^ NǏ@` Pl7JaItdڈu :sVcoxӲvM XDҾȣvզf^ u՚g\0ѳAO8~Қ$Gov>ke`i]YBb!6 esT(n|(`VߎL`]F|9 @U[Y`LЦb0:6LAAaP@~ ;f}J2a3@CGlC J`XY݀]H CeP]MJ;ID'j O8*v;w 0[+u3/:8A5Cv{ñ/ ~;r|YX`ʸ\^)FMQ Hd-ԼT'aQ@5pۥ<);E8YA\^eǣGҶ|JܿJk_"Pp7k̤_Ho{Kfwj~`E_5v:k`& X`;t\ԟr&e#On4 /^jKf:b% .I?0 WEX80*/@8 -` ڴPvY=O`TasJR4K q3 L cwyaEꕛ8sb?ß>㽳םg [YEY:}p(No?LGCYb~Nwe+J5z!$I&T>(c:4~ͪ2>|ɬ*nd_"7J w_ 7_9\[d ʅ~scp~ϳ#R@ ˰g]hG& _r}.ܓDHI) JKz~ y,^EV3e=c8ʮqKd(;d $`Lf߁Բ|#M B]cdxǟpd`>by0DTDB¡l*VBtDGfWl0(!@lCS5X~"e&'mf7}_(瘼ʖƃAs7E<$#Tz7k 7aI U-MtC4?YVV2rC,& jB%(g~H*À-nݑDXQ(̹n~BizYH-|k7 L [P2%Ɏ Z-UOhL(I%b, mH/8SfHgC st%, Y7 <N"77bkkM%-Ʒd,U%/95h(j\N*)c$א|? W$(l\kue@qS0F 'i/d vJܒ!ZN, kȨ8ov KY_n]c <#71$s633}e#"G ai!ǐ"ʴL~S|KW{݀ڜ/TJ|p긂ʦ3H1 P cQP Z̜{EFs@f!Ί8M?f`-DE\)d`I9n+{dBu?;'m.O|nOvݻۃ!QOww|彝_#.F&8Gj [ڷ M~* Q#Y )'Oe!@FK%Cw̯bp"e*qfÊ:A%PPgGL@,ZP+YJ4yXJ(ַT/)ڛ\I=A["}z.6/te~ޜ68C* &:UW@IͣgCFC/@*֞j1a n0ͳ7u| wtҰV Z`3z0XO.pxoSyE{~pkY'H ij$8+eE?y !p8 :Hظ[6fcBrhp7Tb8}x XJ[FLUǙe(V|W i8nXԴ1OƐtxjׁyxE͝{{_޽N) ?[A!'FH&\Q?h_{O81n(邃@3m,V$K2U#@=0EX@z Rr4vq<+/(2"^U/"jTSSpWc FLcwF.C2^M"< ;IŕDp4c*:ڼ_q '<^I&.`ҀnfƱHӶCZAlzJʔӶIk_Ӎ!aB/t'!kz@3 ־³4#+yd,Ӈ'{aQ>@ /HF;ʆWT:V0f AؕCѪ^YN} `4梣E79+e"U&Q Xk}Tg7}0+Z/=`PElk?gZkaЧfZD#M$Єqrb8 4F A%;-+U1@zWVlQyKek:іc`Me)]+ s%~ I"h8Dq֕ڡLUPR~)E||6KtvۄѮ~@Wa҄e嘷@?U=<9}1zk+UkqZõ+~ҟY-45|c a4!)Nyֺ<%x LР]5lnʷܤ!"rn+l}Дuf.U!e0X' ,*]ŝ,MV.*a5zA0hQ4:wBM?b_^~>,]8ͽO}u3>'X/Q6a,:`rKfQÒܩwd<[>EmvG*ׯ@֊%F_Ȳ>.:-}QfCQW~@jWWVQ4` 'ZgVG7nr2(RkT%#{~E]O PD39%}NjuJ_KϺ"[S}t<f#;eo/|=c(XRhj8pkFMF#oA nVe{LHʤjr z̼pu&o #>4n;zS̊nȺ&Yik`ne?U;IV^"EXHߠ$uK * ;Gv&vxv lRbTRja"Ol[hCrw"g=4a ChJ]aw닰n2%.a#<{{_Uo-۸a쀌|xc DO=n mWK֩C}Ltt&(Z0io4W: _;KDQc1>>ZQx=Qnm :y>{ŀ"@"mUb|:Dt;ig"^AX|NV%'/'kRxtڻt"#gtº/ل Yp5R%kt:;4 `.C{)S*(:z l@`EXS>p? @Yz ~/0tzbԢgBP⤏#Üf ږ `U @)弟;qpS,z ˩uE)|%yW4߻ r/Kx01B+u@\*{>&)6r3wf{)NƊ s,۾L~ii1otYlU7^v(_Cv_PE`8EkL 'h.,wE5x%YFZ$b6}aIa~n8L?;< 䆴"famRC@*z ȗd.E0> >yjC+ hSzT({.EXQswQa梷Sv9墨ɓtH./*V~A"Xe4)%n;~GDvoPDΑ\y3lSy\u#6LOFj=0~`o*tuj:DЇ+0M_3EVK^s#[J\M38!v+K֒ѻ.6 /fHعAIolẙT r' ܖHFLd4&c P7פ,ׁ{lŤ;精W p~T#eQ3 5cv_9R5޴OE*&;i< Mɢe֏:ymV\6Yc+\()K;H?:i*:VT Gw*jnX|f pK@a|}U3}DȌ/TԌ5HeZ+1.?];v.ZL"F>@JJDfw-P=5a~cxg?Դ&‘Ah0q|>*g/CvtJ b /Z4)DQvC3|U I EfYt̶orĊige'lb[c1 h`EW{H-/SG0e!/sf/hxk Ql0Vfj%];Oc,j%A7<Q(ᗯKZ <y<?W +4jP:zdEmJS"C\`jH ֭q QpnmCG-IUr$O/s-1<zUI}WC3s8.ðxS"SI6YH7J+ń(&/9KHB髃5HmėSN*x
x}rWS1#P&EmYT%D@DDvļכ';-3A@mU]̻{ٗQyWӃ`ֳz:<=g{;{;Y<{Ev8y_|{xVqfs<`e3k1 qZ|`~Zp]^fu6.Y6x+&l8 /&a>WLc1<9\g ~. gd{ꪝbt{[m?=|٫G[XxTfٴp\g'X(iN 6/ޫpxmfr0ʧ<"Þ|=xZ˾}&/8~|gG^e_f?3|z=xٿ>n3+*If2!X WA)g~Mp0a_N˟&b8aΰ>F /|. G&,<B;w/giyͯ'{Ffݰ7T??6Δ6 aϟaǗ8|'y9}sۗh&lO~ьOgɃQ)X^8-~UL?3Gvo[8(: ãW/?|g/~='xut/xr9 Օ#\r%VIu['w֙ H̀GgTOgݍ0 =@_ƞchb-X}9ʧdz|{fno R Y?|{x0<mԲ_#pؙ}E/~1""yMB :,OsPw0:+Fw fE r/7# u˙ 0ɘ/tŎ/dy2-q)ڟ}F(*@xϗS]xp{$'YB Qer:~ B1q`j˴haCQ_ Nut:Y1l{\ksDPȶo9L!v6tZN[ * ~*XJ v|<V@ <<6rsJ0 }B| gNV<(#pbYN0>?G.;TC @"FE>/HqrI׊-"D;])]sa+$= iA2ɡev0 z.Y)&Y:+y0,^2Y[^dQs\RN2e0h1iNQ5MuB9+1f)@Hz n=_JGoWd-ݧ=)Ly [fU߿vs@TrbឌEb~4< ."ux О O9D SFўSgJ rm+s#&Kx$kaf/b~C>/g ɁUPk>,>k8glRe&,FkAmg?/^ͯ"kM $LeH)HHYkX<hF%q<sRWDXS+F~@1وSርg;Jh5b QͳxK(*bN->{ G5N"列RnDQ %Xӧ6l7CL ans*S@yU3ËKk ᨼ>2"E9눶 B䎺[j`mLXE>*6aׯV4s>N~Qoc>蜐7c,JDSއ |vk#c7ߨ5g<I_P@6{Ãy=Ua~9GOyǷG\ kG5+640L1tR>?߹XSX6Nuo1_ N/ =&LWT>;*&h1:o˲ڦ۷uܰ yOV ~U8N:hzYlֱڲr\+ 8%vO$J-UP[2x>-JeL'sL|s]vpw;SXWC0R*5)2pKu{G5)*5XmtTN9;p|dxKq3E`tI _o ˩,@[$@(ަzMݾ.LS"{+ H|h.:I9bM%N$)Î[f+ f2a_qe<D,1Z^=]`wxL'cEcm@l%@֐0Y&] dl Š-fR v:/'N'~P<?*? WrJxQ-6݃_.?爎*W^cNR^(=+3Ϟ~d4.Ϊ`热Wfs2l!W\ dk ^Ϡ*1P2!^,*ʡ(Ҭ2Ym>4:{6xݺtdw:Au`d4 ѧa9D(d+s"&<S.G.Ч)kkJ~+iOh*\#)4.6$Ӂ:wSj ޵WUZZE%RWVRU]%nIv5W$8jz,<&!K /5~qKiu҇U$ц:CYlm {3yPƋDcz>쏨Y>*.;gO @Y QBŌ,GÁS.oAJmll `3Eg;@6#l`c IRE/( `M^E0IdQ9y2f$h8FQb!0QTb5,)je%_JU]1zy Le WZO* CPnIaMxVWU +,Ļ4^yd· +byN} Mn7QY:bk2r"C;]X+= $H@ bM3$ƨ ƧQ䴘% G^3j^j]5V-h"k>E ildٺ\im/{Cugvt2ߋLjӷۉ^n[p${?y;5,4@ЇpXEb"E `8sDBx3 %EH,!F6`}V<NC y 1[lz|A4`CF3R$] To>{}iZQno@Ƿ T+:7s~U>y<{2-/'QsK%Ѕ&7 3FpzŇoz.z ?~ .5ȧNp aP_LBjI$B9cC?O@imi'Ǡg/4vr/7r]#pV%p+:LЍ&qNt01N4w.f84>?BZ?GH,&:"ӧߐA,>5sf! 6z"hq]R (Zk}v )Zv8`0-\s8ɂ%/,wDRôATd|q`V!ڛMcpbR%E /\ ) L:6l1T>2qz?5X}E"/{ѫd f#  B|t8TDɐRĶO_1CF>}3I3&W;_w}y6>BÓgHR)gHY_q@I1#.GsQ;R! )R/{*4ʡkU'qJ"q} 78U$y <!WP.IcD26dKguSG^NW:[v-Yp0>ɉ6J$Ft(ҋ(ЉO6oMꉶjkn LzE, + {mb \,l|Ȫᨢ#j~px1Tr@ggcBva h,1I5TQ./."3 /2 c ADp2ӧrr@ix h wy!6BsGFy3#q̣cB]2,6 'i2 8j;ZEG؊j75iB=ʗ.j*džk|r:Uk$amNX20A!-2eB4UNaJh!netahܷ&qD+׍g|N+ޠfk#pmJ/|aƫrzy*fc,TuoH#_q@U A\Jȝg դ>8aa} Jj{J&j-BkO!NP*pk$,vp ~n9)Br?4-{0b,dl' BJ!Ԕl1šoIlk`F3zAr2?=Er\' &UMG{ .G" UCAwWA3t̅S<[}6qQ0CL/ MDda~W/ܞNLxZ]hB|cHh!BCu24 hZM,JlrudC02=nKW%Jk7H#cXnp$6 ő~`h1Q Tqq7N3܁}C@}$e6:ܚDT^(u:DH ټЪO)*Jb yJvV-H5dluK'PP %}@yPXYv-Gkj0C\(@*O\%10G=4kfd*9)?TY9TʡꡖB89ub8Xy6"#0DF+pbm\XM`(pNj[{t%B2I"9;=Mp~ BAڞ2yAA<jYW)p0d AT%rt\,7E O+V^sJnٸ k+䘥 ؤkk_nc# 2PJT5 D0#zpOwb+< }NnFSRv_m1xIXvV6plfw7{+*r,u"f_]j< Ń1y.-NgBtu dSGxp>y>[eKFiL]UUQٴ߬[*b$<ߛH"!3Mσ_~R)JH~Z.g2 I>qL]))H1.OHm@sqj[m}T=ֳH* '2(Y s& @nH >Nu,ouq7a)?@:! +P'inof4tʥٗ(&f5"fUDRrf=~ªN2Τ 4©9  e/ڈ<f6%kێZ|TNp&V<|i\%Zk-aΥ9W.VQJTpw8,"~Wj;ZD^ z|Kl "r| ^X,o)Q!^' 6'_P!ry׬K.' .դtgR{;HB?_96B_W-a.f)ׅ`!AUj aEkEdfr(*[S] ̵wj OIxU.u7۶}1S^TPw#/g2#5kxLpe\PE#&US?hM2!ub^Eތ |t E%K|(<bPRYQo)TB@X(q(m ! N !ZɡyͦQ)my?y!M&:K!"+SX6=C"l)FX",GcT޿si1 1bO]'6<*{(go5U)!l1L*5M-Òܬ MŪWWDdþ#.?-#(vDuݷ3.-%xNc,(ՒcɅ9KfvgQ}H`kYc2 ɔVxUBtR2DCpmu"bv6c150+c3D1 !L~βK:#QVWT]@ r0ʴ?(exn)pG?J'[ $yZ^h;!&n@%ܥy'ݖ<XNty; aQ uz7 ĂݥܗatDu\8ԓq ;?FہA^}r3 -W ͐-`X,t_ݹ QXj-j~w%}pGл_!8M8)sQ԰;PL5TՋw_eY?5!o}M;?yL?i#ή/V3{ LՒMBU͈pZl Nk.0~NGS71[nj7qU?GLqvjjLpݶ1-ي~Ca ԉM,;@q|LETګWz߭Vx0{X^RKnSPt^:QOAts{<a95r5ixR1""jrEIHci<&r]%.&7e/^L\ z^aq%֢ T]7rP[_Q@S0a>#d6dn ߥL}@}_1Lueve 2^}A (TӦR9PDn(+Ư2 Wgy wDJ6XB»[Mh> |?k0ngU kǠe\ߦn;h^͈ߘ~".&rư.v }ԝT/A2\01KGI"rCI qI݇(.:%)g((ZyI1a,g}>3)l*x~{]6<BDcĔ9s:} {+uwyl=5iݠ<k7[@n7D%hx: ?eWR$ՉT^GW_˯i%(_Hm/-uԹ:Z?yij_Zi7YϾcwl[b3It aX0Q;fM+ڤ}NKRVڷ9qxYzSu̦Mտ9/-yW띍Cv_%,aONYls G™JX:q5>F_sH$A&F+!O)edr[H_[6&Z]`cMt==fj)j:ҙ?eWh ($<\ |Eӄ+Z9t3Ku~cRf32W: FXOVSdŊh+u"Gի+Hĕ. W]q&j--xxeɩםVTUn5z}-AY *#"qrzl[B\r?:V%he`L΍"aw;!MNSePTdFA`buE[f;!f}Yx%])/ gHұ/f&kv`q (%G/_ƁQ*CrvXEB$pߵbV_iP"xUS3d4v Ӈ66NMүj9 V :"gfaI'bt_aS>[Ze8YA$Eaub$ȮpZvwmAO&!Ѷq Q^g%!#ԺR' {M)X2p?\ gĻaI֢?ID[N'F:Bw9)75FL,Iu':5\P%<d'<[H;) \ d+Yys~8 u~rR)@U GDe )Gv>֋gmQb#\ND#aIDKc놃0F<)zNZ"t`yPʕ9 ( kC$ix}aȃ=Q!,.Sӿ9{_"_ . ] <`r}D6c2n-W2˯yrabM5LP' qJǨK&Q(({h,_څÊg0Qb_ -,D/@eEO4*eZ?) zVHaK aC/R?(.Sd})uѠX 09+{r%JRS*^&ʭ##JȲrW|I\_L.#ҝMu MAcNiƇ13{ :'_emNZ˿Io@ aF߿c(-=b@+Ei24V[ 'HQ$}8(Ҥӱ;,7 'Xxc@Лjyٹ.c~`KҨ} *{>3a6ٱ|vzx=\bZz G,>MOM^PYQPNɢX6Rd [y[ M5]@RL'J<G]?م,QjJ̓J]0@L{:KcGj) aJh2f?~[N钏dJB?@zAFU>B=>NPjc}-y?X! yP?JR9#8P*?|wiӖau (.~m}@dlv~mM q ˉiQ1CԠܦ1L Փ흹 1dYMdcF WDCɅw k+;OEBl@H GmhK)V'"/c39ʹΎ)+!*V,%?ܳKי0dBī"Y:pJu_X3HDxM (=q,g]&T8lJ"E>~#C|^O0e @IEk@:]gb m=1ҽ1m <6DIh|)@ 8<˥1O)OCIC?^+E2łJd?WC} &A11lOa/Z[`<^&pMȧ*Bmn !9 `Sgp9`wQB6~BJ*K@'kq9Ɍ@@vSC3U/Oe*/IP;~)"򤍐m5zml3';wjWν_|q;}CATNۓwރVV59 /(@"* OpYs\{O9DEV"OHZDQӹPx8gh.`e=ubװ zFf\,,S,C3~ ^Ȋޚ[QE]MkX'Qi$w-B7~Esie6\(M3 .ff "(zwc%s IkGeއ[ЂDe/w ;>%3v?o40uЗ+Ǵ)h(aDDƃ:M Pd~0C|Pp,t"rXb'EUHԽ'}Ѡ-OXJV"A+&ַKl1&7bήR9Kx^efW()^H"+,Wt>1nӈ"m$ ^8 x^r<'3Ժ}d@׋H<gҰ. O}1EU€T8C+Y\r~5@_#->2o':(c|:3䦙\#N^XB_^,;$X,⩆ DUrPb{&YG#pN(ci]^Bd>eqbi^5K<sU>H%˪+A`^ 0FfZu湏rd7<!*y"&?2  F%R;±d 5鄁ҽpWT CS/2і`J͹r+@n0-Cc ̆8'xm%(ˀL?aݯ U--4/%2E VP[Z%*mH`| Jv汰$ԊM -#V=o'%RRaذ_Er#83<Ouϧ(V\{? ocrh%ZPQ pVP~ - >=M^,)ƉZV^T+%GsvGw~hhiȲٷa?u' 1R "f-<߶vq +G\y1OGxěa~Л'LpcM$s/gmZxNU{0fwv"J i}8!M`xpk@XHA@'_8AbZ)d;rEZAL|Jڕ[mϓ?; 0t _!e蚡Ms.|_ kS؏!_eV # _^/aEJh"@PÒt3rߺņȁ&y1Wr)9欐b0|͔ÜLX/ i #Z=E̬V nvzr!«1&ahU@%g]`ʡsup*]F4{$̀z$Q mc`#1OG'w[PW[H (d"55VG6{7=v4[%nQx2'<hPJ\iVgD`a/% cweof'm s[?ͅ^wnV6G $2!&G"-/F R)s^pI)ksIMH[9AX=>S ĆSdCo\b596EE&VR674wIdA쓣Z_=H9 ZP@ɼ IF38rE# 4ӝ~Bz#M-+oʕѾ;N:q! MsrS<bI<=x(,W]Q&ZE\Aߦ`Sh[](,׸l>{]Aщ„'`#gDNZ{RZbCMm$3D@6䳎Ɔn$Q[/u̗q+|VC / Q6: Åֆ~6_Υ%Duפv$n8R*QMYXC !FyyS8V/ 8H>x">{G sFp.O>~7bԏ6fj/Vt7_=Bv$lnӋQRF#?8OV*F%A.O&o͓^ĐG,q؇9f @30 z:d@=#⤜HFWps ۢ b $-Ъ$O.<F^DmdhBvˡ+qqk .T!#Qd-0@@m AA?H"nt8ZO?9<zyG^=9||󇯎^>{Q7.ݸ^pAY1%p62PPo >N sB؈4t2;,r*cYSVc7C(ep 3/sqZ$[zUn`XMt pH~{؉= AمvJfYuXݷi;T uMA2s^"~<eCd Vv 13p%b8ې{/]c ' 6F3c,!O<zn$.ag "1~8p+}4?w,8!=j:%hi\?j&IʘbHDk3vj"̒+\x ڒ5N^uiIJrqmg 5勍t6.x_ZEEO9- xj$p퓲*-[uյQ ̮nB_ފLpKVjLDvJ '~18VDQ#~C42p,_H0:6u/*A#Q knL>ɧA (Wdyy'Yq*.ڏsod&F鸵 h8aXƶ":(G0 !\A&1#8aUo6b DD<8<&\^ylDO*eQf| M?+Ie"VfQ)<^Mu Rz! ժ@lsQ/}J̌l3Vζ8l#~-7YEYI񭐔0l+Mf &}XC"Fg| į%0nPK~˜ :xK-CR^^_i|| 3wa @SV4'ths",kpi!vPɵ0kMG鹻CN2B27SpQDoG\5 U=['$+ޢEp/-k`i6ɩe@ H2,&xP8:<ˇ,!tY:MR?(Yþ2-^-Q䓨$X4%cK=pGi`LѫC?܊_9w]bwТXwbPg]H'rߡ *b+Y?¯k~Cu:Oŏ,<5+cqQ@ttXnf>fAB!݆U$[)* a Dj).f XVZ |}EK.ztCȗ4Q0\C1(Cä;@M;N6CvhӐ EB!"4$x"l-Lyg\ƛ"n8s8M\%upya:g V#%DWj{PCcEDI`v ks9':+0SWt8VL,X>kD5+A|%^, ]sNH Nl)NV3"q!EnH#|㌟@DXyrpPG_)khQŒF)H'^9NKϥ/6NBj-tp:#FS{} Qr _eS b3<$E!>.'C)’+ k3 $S-:;RP?kPQ Zk| yFbU"Pt 遦Re5+ q5\[wa@&pi۪fEfN&bpp+%keF/9]:'!sF=9燷. p+8Rx- {--6@LmE5`B7v*ٺpPtumN*7S`gU+M  Bܧ˽+7?ҢPO5k3P%|)D7ßt[z0b4Hc5154T2h5#!j5z%W=z VX=bkv ɐY EbtA'5RQQe{1:KsF|Lk9c b[:7CKf|yM5Tܽylܤ  I|gI±1Y{Ϩ<VU޼ZswAA#!~#d-JES!!.o{+x%5x z1 f."]T6A[NN껕jၯWjX {a$]TeaY Mb I#^!]>#(*#H&IP.|#Nu@}~:T -IbJ"IwF9vtq >`>LN4-ŚE*&UiEqq3Jꢩ3Ƭ>8VIᖎHh{h>Ld)d{qGJ-- E[ܕۅ['Hspaס:pťCS{>6UPdMh,v`*DXj)@$'>8+4blWxcrC0p kwewq]  mE+T=2ggWOb7|D]9]5 ݍ7iV*= grk¼] ǟ Mh+!0jc[*WDg;HKPx8_Ii2h7:nwD CFp̎NB(^K~yI=?H \s#4gQz {@(nV1( U}1-(X w쒰tgiz+E^IiORW9dLrvh֤N mceJHpX*HgÜ$/ r \Xg\H..cHư/%G -j N(G“)Q.jŠLQ\T(RtGDU1ZtKET!Vtudn;Xɽ/?% iʥz@_@K&M,]X{ܢ꭮<(BPI'!> )vFݭC1Y "e D hN7tÅKSJM)8vr|Թ_4:4p)d"; 2*~95` rՐ)-iz NQGX&{t7~Ǫ.Şt- ;`M քg,:ڭ2~(ɪ";JC dxO=eT`XbebhUe! DۢZid_=.~JHa?:n ͵ά%(Kk%œNjd T %q|6@^HĸȒ2( d.JHjT |h0Dnѵrg LdE?!A={,AT' )fZs6HѢD,vF lS\c 2#j'd *g!I9\ ֻpF]1Lp6Mz2a `fY2.5 8xR> ]˾ m<54n_&/Lӱ݃ܕ#vF-t͎Gj8H1ۘ}|;']PLHJCec77 y ^4'4hLX:{<=qv:X.5qÿ`]we.W w[C*WAR ʔIG.x5/\Q 0Enzݾ2pFڷe@~[Q"Ң@|':OU&;M aMW1hE7T |qPM_oCޑ(iT~d&Xkj3"^|D_8A VU TF!<Si(4?ݍw'ZVn}#~p:n'Z_Sf;oed(no7Q/2E,,ayj{]7Ϭ Kdtgs@N$Xu3NS"<!.U<ԥ =GPˎm,7Z&S'-l4 "એS{J:JK,UY}Y1ffxRsjPػCd_R$|vv,mIGfno`Bu~%cI ;`xv{V$TXҢДP:L#(5gFIFıvDV9,&U|a0MUR/*A7EO0gFP׬_Y>qBrO@cڮ υ{ޟf;JJB' hFfnKXF?7UAѰq4ŗT#͆ UQ,Cۮ;j?Ӌ14C)+et=&w*Ŧ(in.]ӎ'}㛎W7zx_y$%k:A[&#/9cg8b@&ز~~!Ӎ7"iuyWG%qΠe8 'lir~K\M60=鴜, OU% ΜEvL]c]NPAIBQbn86[K89ƽnQ3I}.[9<zm9Gl]p{3@WGOGrD2fX~  s޲vrtbNsnTf9PpmU`6Jw @؂]/ZЉ`H~aMTdz<Tj}4>S5KXK8 KF7oת#_e3}|oCPJ۳oY7pʬ+WFA{ruX݊dծ %`6 z:s{K8**p.?T8s!W(n*SҰ8@[Kz$sմ!;~?'i%u,j95 ;k)Y S/NJ%cKt<dH/A8 jp~]oGtaXZ(psVM oSݮ O`Gm(,-<+Z!ŘAӯ0zcv\U]BHɧz:UJ%{t^^O'?fv矘6v3!'ffè׍kq!*FAC"{x_/Ɔf<k@wPv(SG;[*h񟝷Qbp{qYK8>$G "jǖ=CW\8}(E6 -s3.􎘕%/GEgt_Cch5ɝ +7cq`=S)x pDx2¾0/_^%e|G5+ql0d@' [v<cpǜqQ T0Hh3(DĹ&0EPFI!hH“G 'h`+υ>fy46lL,5Lgo-Ampb cҍ6F^: t8Y}P QSw 0͎"Tߍ)TroԓxHXaPg sB 1NR!L?Q>Aӻrڢ286pbD۷>#fW:K̝CRU`C9U e~0Zf睯ؤ|OL>{XՕs9jC:)$8Kkmޯ= a%d =FTWTWyi1^G]f*YjPv+z\34(ȇ9*~O+E1i"{j絠޵C4@j} jѝOkD *g\x-s4T;;S *!BZ'bZ&s)!&Z ww-V<eN;j0@du4촿8游]R޽f^U]bp{Bٍe4|k&Sk"EfGeJ ٳbBR ,CYr0N.OhsVҹǂ hRhӳBr&aFo,k*&R&lQ"EXK7"ED :ޛ'J{a `)LAMJ .c' 1/#%ĐuU[%xFt$ 1!Mr)@ED Yϱ[7sA_!f@c3:t,#Krb]¬c+*yz Q>nK ؈l]X:v;^]\ !+I(3;1Y+ f1 rtj(9bg@4=U7[DL]ȵZ4y9jK L^!IACl(=P5۲ V"-ͷM^aY?ޓ"*@pQCPt.…F1Yګ.l5OíLd#Z9_.d&԰bY%D1IJHP! Blp/,xژ5aZ ES:!"`=lxʥ=CK>C_Y|fc>mQ>zuV_.<GqݲZ-d+!k5uj=+o@%[_A&󱡰8^hB,g0 LCa<<,8  eTMG8 t 8Jhx2_3ZjP /FI~/T<ղ6b5 #ۍ@|C8<d#Rd4ٴ4Lr J[h p[ʲ>J.ɶM)k g2sڒ@1:g<{~!.:LI<XDGo"RNP ~dJω6iQ24fG iٴm\4{*,%G{amzֲ|S,qɈ΀ٵ+;O\ <:J) 2 vQqBjbާ0rhԎQwi𵵔3d=#F̿eEŠ<_:wZep'\]Y +<1 ׍;'vv5k0!seYUo"gkRBAUq|5GoѰ,! |@@!G ?J!di(pvyB(ڽr[[3 r[HzoC?>t5HX {1 2ے"mn uG!-Y;, A1r N@AٴO`zdh` .XSȢ{I w^,(7I@UFEPB (l5+A=ۃUE.2aqObۅp^`@[$;gZc~~#FZa=,n֢EWL鶂|4F󻶠q1\+2!teFB SiVQX^RIx⽯_0C<yᇘgsl>`ZeMζ識ag#ծP܃gWo1vw{;{_9ػY]o۸Da,Q(o12w4&|1ȳ0% BÍEBVZL=Wzஜ0׬gJcDZQb Ǚ[ZMtˠD2 ʅIͥbUDSBuΪLOqBTκW cqNx G%9[mOiEV&@0a З8\Z 0R s]u3pQ!AE]>1߼;_6+Vydg+_Di0 lγȮ̲pzőhǯBP'Wrg3P)F4)DC;x&Bg4 @. /%h<R -0[qEc:20DSK v [ s/+y)'׫SO;HNV B]BP$˱{o=D]ijj:i’t:1̫[ώ`#cY)TT~-$$JBQ,\: aN'4l>/-8L+ D?"(nד՘ƨPpۦoR'wf%o )!Q> 4>FhGUO|3}g"#}/ǤbXܭֱ*T|BE'%dzj@aWvҢV:#@:*4,J2Y|)>4TFTjշ4yn H^ NǏ@` Pl7JaItdڈu :sVcoxӲvM XDҾȣvզf^ u՚g\0ѳAO8~Қ$Gov>ke`i]YBb!6 esT(n|(`VߎL`]F|9 @U[Y`LЦb0:6LAAaP@~ ;f}J2a3@CGlC J`XY݀]H CeP]MJ;ID'j O8*v;w 0[+u3/:8A5Cv{ñ/ ~;r|YX`ʸ\^)FMQ Hd-ԼT'aQ@5pۥ<);E8YA\^eǣGҶ|JܿJk_"Pp7k̤_Ho{Kfwj~`E_5v:k`& X`;t\ԟr&e#On4 /^jKf:b% .I?0 WEX80*/@8 -` ڴPvY=O`TasJR4K q3 L cwyaEꕛ8sb?ß>㽳םg [YEY:}p(No?LGCYb~Nwe+J5z!$I&T>(c:4~ͪ2>|ɬ*nd_"7J w_ 7_9\[d ʅ~scp~ϳ#R@ ˰g]hG& _r}.ܓDHI) JKz~ y,^EV3e=c8ʮqKd(;d $`Lf߁Բ|#M B]cdxǟpd`>by0DTDB¡l*VBtDGfWl0(!@lCS5X~"e&'mf7}_(瘼ʖƃAs7E<$#Tz7k 7aI U-MtC4?YVV2rC,& jB%(g~H*À-nݑDXQ(̹n~BizYH-|k7 L [P2%Ɏ Z-UOhL(I%b, mH/8SfHgC st%, Y7 <N"77bkkM%-Ʒd,U%/95h(j\N*)c$א|? W$(l\kue@qS0F 'i/d vJܒ!ZN, kȨ8ov KY_n]c <#71$s633}e#"G ai!ǐ"ʴL~S|KW{݀ڜ/TJ|p긂ʦ3H1 P cQP Z̜{EFs@f!Ί8M?f`-DE\)d`I9n+{dBu?;'m.O|nOvݻۃ!QOww|彝_#.F&8Gj [ڷ M~* Q#Y )'Oe!@FK%Cw̯bp"e*qfÊ:A%PPgGL@,ZP+YJ4yXJ(ַT/)ڛ\I=A["}z.6/te~ޜ68C* &:UW@IͣgCFC/@*֞j1a n0ͳ7u| wtҰV Z`3z0XO.pxoSyE{~pkY'H ij$8+eE?y !p8 :Hظ[6fcBrhp7Tb8}x XJ[FLUǙe(V|W i8nXԴ1OƐtxjׁyxE͝{{_޽N) ?[A!'FH&\Q?h_{O81n(邃@3m,V$K2U#@=0EX@z Rr4vq<+/(2"^U/"jTSSpWc FLcwF.C2^M"< ;IŕDp4c*:ڼ_q '<^I&.`ҀnfƱHӶCZAlzJʔӶIk_Ӎ!aB/t'!kz@3 ־³4#+yd,Ӈ'{aQ>@ /HF;ʆWT:V0f AؕCѪ^YN} `4梣E79+e"U&Q Xk}Tg7}0+Z/=`PElk?gZkaЧfZD#M$Єqrb8 4F A%;-+U1@zWVlQyKek:іc`Me)]+ s%~ I"h8Dq֕ڡLUPR~)E||6KtvۄѮ~@Wa҄e嘷@?U=<9}1zk+UkqZõ+~ҟY-45|c a4!)Nyֺ<%x LР]5lnʷܤ!"rn+l}Дuf.U!e0X' ,*]ŝ,MV.*a5zA0hQ4:wBM?b_^~>,]8ͽO}u3>'X/Q6a,:`rKfQÒܩwd<[>EmvG*ׯ@֊%F_Ȳ>.:-}QfCQW~@jWWVQ4` 'ZgVG7nr2(RkT%#{~E]O PD39%}NjuJ_KϺ"[S}t<f#;eo/|=c(XRhj8pkFMF#oA nVe{LHʤjr z̼pu&o #>4n;zS̊nȺ&Yik`ne?U;IV^"EXHߠ$uK * ;Gv&vxv lRbTRja"Ol[hCrw"g=4a ChJ]aw닰n2%.a#<{{_Uo-۸a쀌|xc DO=n mWK֩C}Ltt&(Z0io4W: _;KDQc1>>ZQx=Qnm :y>{ŀ"@"mUb|:Dt;ig"^AX|NV%'/'kRxtڻt"#gtº/ل Yp5R%kt:;4 `.C{)S*(:z l@`EXS>p? @Yz ~/0tzbԢgBP⤏#Üf ږ `U @)弟;qpS,z ˩uE)|%yW4߻ r/Kx01B+u@\*{>&)6r3wf{)NƊ s,۾L~ii1otYlU7^v(_Cv_PE`8EkL 'h.,wE5x%YFZ$b6}aIa~n8L?;< 䆴"famRC@*z ȗd.E0> >yjC+ hSzT({.EXQswQa梷Sv9墨ɓtH./*V~A"Xe4)%n;~GDvoPDΑ\y3lSy\u#6LOFj=0~`o*tuj:DЇ+0M_3EVK^s#[J\M38!v+K֒ѻ.6 /fHعAIolẙT r' ܖHFLd4&c P7פ,ׁ{lŤ;精W p~T#eQ3 5cv_9R5޴OE*&;i< Mɢe֏:ymV\6Yc+\()K;H?:i*:VT Gw*jnX|f pK@a|}U3}DȌ/TԌ5HeZ+1.?];v.ZL"F>@JJDfw-P=5a~cxg?Դ&‘Ah0q|>*g/CvtJ b /Z4)DQvC3|U I EfYt̶orĊige'lb[c1 h`EW{H-/SG0e!/sf/hxk Ql0Vfj%];Oc,j%A7<Q(ᗯKZ <y<?W +4jP:zdEmJS"C\`jH ֭q QpnmCG-IUr$O/s-1<zUI}WC3s8.ðxS"SI6YH7J+ń(&/9KHB髃5HmėSN*x
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./templates/adding_a_new_model/cookiecutter.json
{ "modelname": "BrandNewBERT", "uppercase_modelname": "BRAND_NEW_BERT", "lowercase_modelname": "brand_new_bert", "camelcase_modelname": "BrandNewBert", "authors": "The HuggingFace Team", "checkpoint_identifier": "brand-new-bert-base-cased", "tokenizer_type": ["Based on BERT", "Based on BART", "Standalone"], "generate_tensorflow_pytorch_and_flax": [ "PyTorch, TensorFlow and Flax", "PyTorch & TensorFlow", "PyTorch & Flax", "TensorFlow & Flax", "PyTorch", "TensorFlow", "Flax" ], "is_encoder_decoder_model": ["True", "False"] }
{ "modelname": "BrandNewBERT", "uppercase_modelname": "BRAND_NEW_BERT", "lowercase_modelname": "brand_new_bert", "camelcase_modelname": "BrandNewBert", "authors": "The HuggingFace Team", "checkpoint_identifier": "brand-new-bert-base-cased", "tokenizer_type": ["Based on BERT", "Based on BART", "Standalone"], "generate_tensorflow_pytorch_and_flax": [ "PyTorch, TensorFlow and Flax", "PyTorch & TensorFlow", "PyTorch & Flax", "TensorFlow & Flax", "PyTorch", "TensorFlow", "Flax" ], "is_encoder_decoder_model": ["True", "False"] }
-1
huggingface/transformers
20,307
Remove double brackets
Fixes a small typo in the pipeline docs where there were two brackets.
stevhliu
"2022-11-17T19:40:39Z"
"2022-11-18T17:29:24Z"
f10cdba22e1a91a8f0774b75de3d2a3826ecb8cc
b2c863a3196150850d17548f25ee0575bccb8224
Remove double brackets. Fixes a small typo in the pipeline docs where there were two brackets.
./src/transformers/models/rembert/convert_rembert_tf_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert RemBERT checkpoint.""" import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def convert_rembert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path): # Initialise PyTorch model config = RemBertConfig.from_json_file(bert_config_file) print("Building PyTorch model from configuration: {}".format(str(config))) model = RemBertModel(config) # Load weights from tf checkpoint load_tf_weights_in_rembert(model, config, tf_checkpoint_path) # Save pytorch-model print("Save PyTorch model to {}".format(pytorch_dump_path)) torch.save(model.state_dict(), pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--rembert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained RemBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert RemBERT checkpoint.""" import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def convert_rembert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path): # Initialise PyTorch model config = RemBertConfig.from_json_file(bert_config_file) print("Building PyTorch model from configuration: {}".format(str(config))) model = RemBertModel(config) # Load weights from tf checkpoint load_tf_weights_in_rembert(model, config, tf_checkpoint_path) # Save pytorch-model print("Save PyTorch model to {}".format(pytorch_dump_path)) torch.save(model.state_dict(), pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--rembert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained RemBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
-1
huggingface/transformers
20,304
fix device issue
# What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
ydshieh
"2022-11-17T17:48:16Z"
"2022-11-21T09:12:26Z"
d316037ad71f8748aac9045ffd96970826456a04
8503cc755050c6ed5bc771e3244c29b71be1841e
fix device issue. # What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
./src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Feature extractor class for Conditional DETR.""" import pathlib import warnings from typing import Dict, List, Optional, Set, Tuple, Union import numpy as np from PIL import Image from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...image_transforms import center_to_corners_format, corners_to_center_format, rgb_to_id from ...image_utils import ImageFeatureExtractionMixin from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch from torch import nn logger = logging.get_logger(__name__) ImageInput = Union[Image.Image, np.ndarray, "torch.Tensor", List[Image.Image], List[np.ndarray], List["torch.Tensor"]] # Copied from transformers.models.detr.feature_extraction_detr.masks_to_boxes def masks_to_boxes(masks): """ Compute the bounding boxes around the provided panoptic segmentation masks. The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions. Returns a [N, 4] tensor, with the boxes in corner (xyxy) format. """ if masks.size == 0: return np.zeros((0, 4)) h, w = masks.shape[-2:] y = np.arange(0, h, dtype=np.float32) x = np.arange(0, w, dtype=np.float32) # see https://github.com/pytorch/pytorch/issues/50276 y, x = np.meshgrid(y, x, indexing="ij") x_mask = masks * np.expand_dims(x, axis=0) x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1) x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool))) x_min = x.filled(fill_value=1e8) x_min = x_min.reshape(x_min.shape[0], -1).min(-1) y_mask = masks * np.expand_dims(y, axis=0) y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1) y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool))) y_min = y.filled(fill_value=1e8) y_min = y_min.reshape(y_min.shape[0], -1).min(-1) return np.stack([x_min, y_min, x_max, y_max], 1) # Copied from transformers.models.detr.feature_extraction_detr.binary_mask_to_rle def binary_mask_to_rle(mask): """ Args: Converts given binary mask of shape (height, width) to the run-length encoding (RLE) format. mask (`torch.Tensor` or `numpy.array`): A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target segment_id or class_id. Returns: `List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE format. """ if is_torch_tensor(mask): mask = mask.numpy() pixels = mask.flatten() pixels = np.concatenate([[0], pixels, [0]]) runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 runs[1::2] -= runs[::2] return [x for x in runs] # Copied from transformers.models.detr.feature_extraction_detr.convert_segmentation_to_rle def convert_segmentation_to_rle(segmentation): """ Converts given segmentation map of shape (height, width) to the run-length encoding (RLE) format. Args: segmentation (`torch.Tensor` or `numpy.array`): A segmentation map of shape `(height, width)` where each value denotes a segment or class id. Returns: `List[List]`: A list of lists, where each list is the run-length encoding of a segment / class id. """ segment_ids = torch.unique(segmentation) run_length_encodings = [] for idx in segment_ids: mask = torch.where(segmentation == idx, 1, 0) rle = binary_mask_to_rle(mask) run_length_encodings.append(rle) return run_length_encodings # Copied from transformers.models.detr.feature_extraction_detr.remove_low_and_no_objects def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels): """ Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and `labels`. Args: masks (`torch.Tensor`): A tensor of shape `(num_queries, height, width)`. scores (`torch.Tensor`): A tensor of shape `(num_queries)`. labels (`torch.Tensor`): A tensor of shape `(num_queries)`. object_mask_threshold (`float`): A number between 0 and 1 used to binarize the masks. Raises: `ValueError`: Raised when the first dimension doesn't match in all input tensors. Returns: `Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region < `object_mask_threshold`. """ if not (masks.shape[0] == scores.shape[0] == labels.shape[0]): raise ValueError("mask, scores and labels must have the same shape!") to_keep = labels.ne(num_labels) & (scores > object_mask_threshold) return masks[to_keep], scores[to_keep], labels[to_keep] # Copied from transformers.models.detr.feature_extraction_detr.check_segment_validity def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8): # Get the mask associated with the k class mask_k = mask_labels == k mask_k_area = mask_k.sum() # Compute the area of all the stuff in query k original_area = (mask_probs[k] >= mask_threshold).sum() mask_exists = mask_k_area > 0 and original_area > 0 # Eliminate disconnected tiny segments if mask_exists: area_ratio = mask_k_area / original_area if not area_ratio.item() > overlap_mask_area_threshold: mask_exists = False return mask_exists, mask_k # Copied from transformers.models.detr.feature_extraction_detr.compute_segments def compute_segments( mask_probs, pred_scores, pred_labels, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[Set[int]] = None, target_size: Tuple[int, int] = None, ): height = mask_probs.shape[1] if target_size is None else target_size[0] width = mask_probs.shape[2] if target_size is None else target_size[1] segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device) segments: List[Dict] = [] if target_size is not None: mask_probs = nn.functional.interpolate( mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False )[0] current_segment_id = 0 # Weigh each mask by its prediction score mask_probs *= pred_scores.view(-1, 1, 1) mask_labels = mask_probs.argmax(0) # [height, width] # Keep track of instances of each class stuff_memory_list: Dict[str, int] = {} for k in range(pred_labels.shape[0]): pred_class = pred_labels[k].item() should_fuse = pred_class in label_ids_to_fuse # Check if mask exists and large enough to be a segment mask_exists, mask_k = check_segment_validity( mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold ) if mask_exists: if pred_class in stuff_memory_list: current_segment_id = stuff_memory_list[pred_class] else: current_segment_id += 1 # Add current object segment to final segmentation map segmentation[mask_k] = current_segment_id segment_score = round(pred_scores[k].item(), 6) segments.append( { "id": current_segment_id, "label_id": pred_class, "was_fused": should_fuse, "score": segment_score, } ) if should_fuse: stuff_memory_list[pred_class] = current_segment_id return segmentation, segments class ConditionalDetrFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin): r""" Constructs a Conditional DETR feature extractor. This feature extractor inherits from [`FeatureExtractionMixin`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: format (`str`, *optional*, defaults to `"coco_detection"`): Data format of the annotations. One of "coco_detection" or "coco_panoptic". do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the input to a certain `size`. size (`int`, *optional*, defaults to 800): Resize the input to the given size. Only has an effect if `do_resize` is set to `True`. If size is a sequence like `(width, height)`, output size will be matched to this. If size is an int, smaller edge of the image will be matched to this number. i.e, if `height > width`, then image will be rescaled to `(size * height / width, size)`. max_size (`int`, *optional*, defaults to `1333`): The largest size an image dimension can have (otherwise it's capped). Only has an effect if `do_resize` is set to `True`. do_normalize (`bool`, *optional*, defaults to `True`): Whether or not to normalize the input with mean and standard deviation. image_mean (`int`, *optional*, defaults to `[0.485, 0.456, 0.406]`): The sequence of means for each channel, to be used when normalizing images. Defaults to the ImageNet mean. image_std (`int`, *optional*, defaults to `[0.229, 0.224, 0.225]`): The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the ImageNet std. """ model_input_names = ["pixel_values", "pixel_mask"] # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.__init__ def __init__( self, format="coco_detection", do_resize=True, size=800, max_size=1333, do_normalize=True, image_mean=None, image_std=None, **kwargs ): super().__init__(**kwargs) self.format = self._is_valid_format(format) self.do_resize = do_resize self.size = size self.max_size = max_size self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else [0.485, 0.456, 0.406] # ImageNet mean self.image_std = image_std if image_std is not None else [0.229, 0.224, 0.225] # ImageNet std # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor._is_valid_format def _is_valid_format(self, format): if format not in ["coco_detection", "coco_panoptic"]: raise ValueError(f"Format {format} not supported") return format # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.prepare def prepare(self, image, target, return_segmentation_masks=False, masks_path=None): if self.format == "coco_detection": image, target = self.prepare_coco_detection(image, target, return_segmentation_masks) return image, target elif self.format == "coco_panoptic": image, target = self.prepare_coco_panoptic(image, target, masks_path) return image, target else: raise ValueError(f"Format {self.format} not supported") # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.convert_coco_poly_to_mask def convert_coco_poly_to_mask(self, segmentations, height, width): try: from pycocotools import mask as coco_mask except ImportError: raise ImportError("Pycocotools is not installed in your environment.") masks = [] for polygons in segmentations: rles = coco_mask.frPyObjects(polygons, height, width) mask = coco_mask.decode(rles) if len(mask.shape) < 3: mask = mask[..., None] mask = np.asarray(mask, dtype=np.uint8) mask = np.any(mask, axis=2) masks.append(mask) if masks: masks = np.stack(masks, axis=0) else: masks = np.zeros((0, height, width), dtype=np.uint8) return masks # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.prepare_coco_detection with DETR->ConditionalDETR def prepare_coco_detection(self, image, target, return_segmentation_masks=False): """ Convert the target in COCO format into the format expected by ConditionalDETR. """ w, h = image.size image_id = target["image_id"] image_id = np.asarray([image_id], dtype=np.int64) # get all COCO annotations for the given image anno = target["annotations"] anno = [obj for obj in anno if "iscrowd" not in obj or obj["iscrowd"] == 0] boxes = [obj["bbox"] for obj in anno] # guard against no boxes via resizing boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4) boxes[:, 2:] += boxes[:, :2] boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=w) boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=h) classes = [obj["category_id"] for obj in anno] classes = np.asarray(classes, dtype=np.int64) if return_segmentation_masks: segmentations = [obj["segmentation"] for obj in anno] masks = self.convert_coco_poly_to_mask(segmentations, h, w) keypoints = None if anno and "keypoints" in anno[0]: keypoints = [obj["keypoints"] for obj in anno] keypoints = np.asarray(keypoints, dtype=np.float32) num_keypoints = keypoints.shape[0] if num_keypoints: keypoints = keypoints.reshape((-1, 3)) keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) boxes = boxes[keep] classes = classes[keep] if return_segmentation_masks: masks = masks[keep] if keypoints is not None: keypoints = keypoints[keep] target = {} target["boxes"] = boxes target["class_labels"] = classes if return_segmentation_masks: target["masks"] = masks target["image_id"] = image_id if keypoints is not None: target["keypoints"] = keypoints # for conversion to coco api area = np.asarray([obj["area"] for obj in anno], dtype=np.float32) iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in anno], dtype=np.int64) target["area"] = area[keep] target["iscrowd"] = iscrowd[keep] target["orig_size"] = np.asarray([int(h), int(w)], dtype=np.int64) target["size"] = np.asarray([int(h), int(w)], dtype=np.int64) return image, target # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.prepare_coco_panoptic def prepare_coco_panoptic(self, image, target, masks_path, return_masks=True): w, h = image.size ann_info = target.copy() ann_path = pathlib.Path(masks_path) / ann_info["file_name"] if "segments_info" in ann_info: masks = np.asarray(Image.open(ann_path), dtype=np.uint32) masks = rgb_to_id(masks) ids = np.array([ann["id"] for ann in ann_info["segments_info"]]) masks = masks == ids[:, None, None] masks = np.asarray(masks, dtype=np.uint8) labels = np.asarray([ann["category_id"] for ann in ann_info["segments_info"]], dtype=np.int64) target = {} target["image_id"] = np.asarray( [ann_info["image_id"] if "image_id" in ann_info else ann_info["id"]], dtype=np.int64 ) if return_masks: target["masks"] = masks target["class_labels"] = labels target["boxes"] = masks_to_boxes(masks) target["size"] = np.asarray([int(h), int(w)], dtype=np.int64) target["orig_size"] = np.asarray([int(h), int(w)], dtype=np.int64) if "segments_info" in ann_info: target["iscrowd"] = np.asarray([ann["iscrowd"] for ann in ann_info["segments_info"]], dtype=np.int64) target["area"] = np.asarray([ann["area"] for ann in ann_info["segments_info"]], dtype=np.float32) return image, target # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor._resize def _resize(self, image, size, target=None, max_size=None): """ Resize the image to the given size. Size can be min_size (scalar) or (w, h) tuple. If size is an int, smaller edge of the image will be matched to this number. If given, also resize the target accordingly. """ if not isinstance(image, Image.Image): image = self.to_pil_image(image) def get_size_with_aspect_ratio(image_size, size, max_size=None): w, h = image_size if max_size is not None: min_original_size = float(min((w, h))) max_original_size = float(max((w, h))) if max_original_size / min_original_size * size > max_size: size = int(round(max_size * min_original_size / max_original_size)) if (w <= h and w == size) or (h <= w and h == size): return (h, w) if w < h: ow = size oh = int(size * h / w) else: oh = size ow = int(size * w / h) return (oh, ow) def get_size(image_size, size, max_size=None): if isinstance(size, (list, tuple)): return size else: # size returned must be (w, h) since we use PIL to resize images # so we revert the tuple return get_size_with_aspect_ratio(image_size, size, max_size)[::-1] size = get_size(image.size, size, max_size) rescaled_image = self.resize(image, size=size) if target is None: return rescaled_image, None ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size)) ratio_width, ratio_height = ratios target = target.copy() if "boxes" in target: boxes = target["boxes"] scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32) target["boxes"] = scaled_boxes if "area" in target: area = target["area"] scaled_area = area * (ratio_width * ratio_height) target["area"] = scaled_area w, h = size target["size"] = np.asarray([h, w], dtype=np.int64) if "masks" in target: # use PyTorch as current workaround # TODO replace by self.resize masks = torch.from_numpy(target["masks"][:, None]).float() interpolated_masks = nn.functional.interpolate(masks, size=(h, w), mode="nearest")[:, 0] > 0.5 target["masks"] = interpolated_masks.numpy() return rescaled_image, target # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor._normalize def _normalize(self, image, mean, std, target=None): """ Normalize the image with a certain mean and std. If given, also normalize the target bounding boxes based on the size of the image. """ image = self.normalize(image, mean=mean, std=std) if target is None: return image, None target = target.copy() h, w = image.shape[-2:] if "boxes" in target: boxes = target["boxes"] boxes = corners_to_center_format(boxes) boxes = boxes / np.asarray([w, h, w, h], dtype=np.float32) target["boxes"] = boxes return image, target # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.__call__ with Detr->ConditionalDetr,DETR->ConditionalDETR def __call__( self, images: ImageInput, annotations: Union[List[Dict], List[List[Dict]]] = None, return_segmentation_masks: Optional[bool] = False, masks_path: Optional[pathlib.Path] = None, pad_and_return_pixel_mask: Optional[bool] = True, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ) -> BatchFeature: """ Main method to prepare for the model one or several image(s) and optional annotations. Images are by default padded up to the largest image in a batch, and a pixel mask is created that indicates which pixels are real/which are padding. <Tip warning={true}> NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass PIL images. </Tip> Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. annotations (`Dict`, `List[Dict]`, *optional*): The corresponding annotations in COCO format. In case [`ConditionalDetrFeatureExtractor`] was initialized with `format = "coco_detection"`, the annotations for each image should have the following format: {'image_id': int, 'annotations': [annotation]}, with the annotations being a list of COCO object annotations. In case [`ConditionalDetrFeatureExtractor`] was initialized with `format = "coco_panoptic"`, the annotations for each image should have the following format: {'image_id': int, 'file_name': str, 'segments_info': [segment_info]} with segments_info being a list of COCO panoptic annotations. return_segmentation_masks (`Dict`, `List[Dict]`, *optional*, defaults to `False`): Whether to also include instance segmentation masks as part of the labels in case `format = "coco_detection"`. masks_path (`pathlib.Path`, *optional*): Path to the directory containing the PNG files that store the class-agnostic image segmentations. Only relevant in case [`ConditionalDetrFeatureExtractor`] was initialized with `format = "coco_panoptic"`. pad_and_return_pixel_mask (`bool`, *optional*, defaults to `True`): Whether or not to pad images up to the largest image in a batch and create a pixel mask. If left to the default, will return a pixel mask that is: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **pixel_values** -- Pixel values to be fed to a model. - **pixel_mask** -- Pixel mask to be fed to a model (when `pad_and_return_pixel_mask=True` or if *"pixel_mask"* is in `self.model_input_names`). - **labels** -- Optional labels to be fed to a model (when `annotations` are provided) """ # Input type checking for clearer error valid_images = False valid_annotations = False valid_masks_path = False # Check that images has a valid type if isinstance(images, (Image.Image, np.ndarray)) or is_torch_tensor(images): valid_images = True elif isinstance(images, (list, tuple)): if len(images) == 0 or isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0]): valid_images = True if not valid_images: raise ValueError( "Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example), " "`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples)." ) is_batched = bool( isinstance(images, (list, tuple)) and (isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0])) ) # Check that annotations has a valid type if annotations is not None: if not is_batched: if self.format == "coco_detection": if isinstance(annotations, dict) and "image_id" in annotations and "annotations" in annotations: if isinstance(annotations["annotations"], (list, tuple)): # an image can have no annotations if len(annotations["annotations"]) == 0 or isinstance(annotations["annotations"][0], dict): valid_annotations = True elif self.format == "coco_panoptic": if isinstance(annotations, dict) and "image_id" in annotations and "segments_info" in annotations: if isinstance(annotations["segments_info"], (list, tuple)): # an image can have no segments (?) if len(annotations["segments_info"]) == 0 or isinstance( annotations["segments_info"][0], dict ): valid_annotations = True else: if isinstance(annotations, (list, tuple)): if len(images) != len(annotations): raise ValueError("There must be as many annotations as there are images") if isinstance(annotations[0], Dict): if self.format == "coco_detection": if isinstance(annotations[0]["annotations"], (list, tuple)): valid_annotations = True elif self.format == "coco_panoptic": if isinstance(annotations[0]["segments_info"], (list, tuple)): valid_annotations = True if not valid_annotations: raise ValueError( """ Annotations must of type `Dict` (single image) or `List[Dict]` (batch of images). In case of object detection, each dictionary should contain the keys 'image_id' and 'annotations', with the latter being a list of annotations in COCO format. In case of panoptic segmentation, each dictionary should contain the keys 'file_name', 'image_id' and 'segments_info', with the latter being a list of annotations in COCO format. """ ) # Check that masks_path has a valid type if masks_path is not None: if self.format == "coco_panoptic": if isinstance(masks_path, pathlib.Path): valid_masks_path = True if not valid_masks_path: raise ValueError( "The path to the directory containing the mask PNG files should be provided as a" " `pathlib.Path` object." ) if not is_batched: images = [images] if annotations is not None: annotations = [annotations] # Create a copy of the list to avoid editing it in place images = [image for image in images] if annotations is not None: annotations = [annotation for annotation in annotations] # prepare (COCO annotations as a list of Dict -> ConditionalDETR target as a single Dict per image) if annotations is not None: for idx, (image, target) in enumerate(zip(images, annotations)): if not isinstance(image, Image.Image): image = self.to_pil_image(image) image, target = self.prepare(image, target, return_segmentation_masks, masks_path) images[idx] = image annotations[idx] = target # transformations (resizing + normalization) if self.do_resize and self.size is not None: if annotations is not None: for idx, (image, target) in enumerate(zip(images, annotations)): image, target = self._resize(image=image, target=target, size=self.size, max_size=self.max_size) images[idx] = image annotations[idx] = target else: for idx, image in enumerate(images): images[idx] = self._resize(image=image, target=None, size=self.size, max_size=self.max_size)[0] if self.do_normalize: if annotations is not None: for idx, (image, target) in enumerate(zip(images, annotations)): image, target = self._normalize( image=image, mean=self.image_mean, std=self.image_std, target=target ) images[idx] = image annotations[idx] = target else: images = [ self._normalize(image=image, mean=self.image_mean, std=self.image_std)[0] for image in images ] else: images = [np.array(image) for image in images] if pad_and_return_pixel_mask: # pad images up to largest image in batch and create pixel_mask max_size = self._max_by_axis([list(image.shape) for image in images]) c, h, w = max_size padded_images = [] pixel_mask = [] for image in images: # create padded image padded_image = np.zeros((c, h, w), dtype=np.float32) padded_image[: image.shape[0], : image.shape[1], : image.shape[2]] = np.copy(image) padded_images.append(padded_image) # create pixel mask mask = np.zeros((h, w), dtype=np.int64) mask[: image.shape[1], : image.shape[2]] = True pixel_mask.append(mask) images = padded_images # return as BatchFeature data = {} data["pixel_values"] = images if pad_and_return_pixel_mask: data["pixel_mask"] = pixel_mask encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) if annotations is not None: # Convert to TensorType tensor_type = return_tensors if not isinstance(tensor_type, TensorType): tensor_type = TensorType(tensor_type) if not tensor_type == TensorType.PYTORCH: raise ValueError("Only PyTorch is supported for the moment.") else: if not is_torch_available(): raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed.") encoded_inputs["labels"] = [ {k: torch.from_numpy(v) for k, v in target.items()} for target in annotations ] return encoded_inputs # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor._max_by_axis def _max_by_axis(self, the_list): # type: (List[List[int]]) -> List[int] maxes = the_list[0] for sublist in the_list[1:]: for index, item in enumerate(sublist): maxes[index] = max(maxes[index], item) return maxes # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.pad_and_create_pixel_mask def pad_and_create_pixel_mask( self, pixel_values_list: List["torch.Tensor"], return_tensors: Optional[Union[str, TensorType]] = None ): """ Pad images up to the largest image in a batch and create a corresponding `pixel_mask`. Args: pixel_values_list (`List[torch.Tensor]`): List of images (pixel values) to be padded. Each image should be a tensor of shape (C, H, W). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **pixel_values** -- Pixel values to be fed to a model. - **pixel_mask** -- Pixel mask to be fed to a model (when `pad_and_return_pixel_mask=True` or if *"pixel_mask"* is in `self.model_input_names`). """ max_size = self._max_by_axis([list(image.shape) for image in pixel_values_list]) c, h, w = max_size padded_images = [] pixel_mask = [] for image in pixel_values_list: # create padded image padded_image = np.zeros((c, h, w), dtype=np.float32) padded_image[: image.shape[0], : image.shape[1], : image.shape[2]] = np.copy(image) padded_images.append(padded_image) # create pixel mask mask = np.zeros((h, w), dtype=np.int64) mask[: image.shape[1], : image.shape[2]] = True pixel_mask.append(mask) # return as BatchFeature data = {"pixel_values": padded_images, "pixel_mask": pixel_mask} encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) return encoded_inputs def post_process(self, outputs, target_sizes): """ Args: Converts the output of [`ConditionalDetrForObjectDetection`] into the format expected by the COCO api. Only supports PyTorch. outputs ([`ConditionalDetrObjectDetectionOutput`]): Raw outputs of the model. target_sizes (`torch.Tensor` of shape `(batch_size, 2)`): Tensor containing the size (h, w) of each image of the batch. For evaluation, this must be the original image size (before any data augmentation). For visualization, this should be the image size after data augment, but before padding. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ warnings.warn( "`post_process` is deprecated and will be removed in v5 of Transformers, please use" " `post_process_object_detection`", FutureWarning, ) out_logits, out_bbox = outputs.logits, outputs.pred_boxes if len(out_logits) != len(target_sizes): raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits") if target_sizes.shape[1] != 2: raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch") prob = out_logits.sigmoid() topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 300, dim=1) scores = topk_values topk_boxes = topk_indexes // out_logits.shape[2] labels = topk_indexes % out_logits.shape[2] boxes = center_to_corners_format(out_bbox) boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4)) # and from relative [0, 1] to absolute [0, height] coordinates img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) boxes = boxes * scale_fct[:, None, :] results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)] return results # Copied from transformers.models.deformable_detr.feature_extraction_deformable_detr.DeformableDetrFeatureExtractor.post_process_object_detection with DeformableDetr->ConditionalDetr def post_process_object_detection( self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, List[Tuple]] = None ): """ Converts the output of [`ConditionalDetrForObjectDetection`] into the format expected by the COCO api. Only supports PyTorch. Args: outputs ([`DetrObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*, defaults to `None`): Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the batch. If left to None, predictions will not be resized. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ out_logits, out_bbox = outputs.logits, outputs.pred_boxes if target_sizes is not None: if len(out_logits) != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) prob = out_logits.sigmoid() topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1) scores = topk_values topk_boxes = topk_indexes // out_logits.shape[2] labels = topk_indexes % out_logits.shape[2] boxes = center_to_corners_format(out_bbox) boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4)) # and from relative [0, 1] to absolute [0, height] coordinates if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) else: img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) boxes = boxes * scale_fct[:, None, :] results = [] for s, l, b in zip(scores, labels, boxes): score = s[s > threshold] label = l[s > threshold] box = b[s > threshold] results.append({"scores": score, "labels": label, "boxes": box}) return results # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.post_process_semantic_segmentation with Detr->ConditionalDetr def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple[int, int]] = None): """ Converts the output of [`ConditionalDetrForSegmentation`] into semantic segmentation maps. Only supports PyTorch. Args: outputs ([`ConditionalDetrForSegmentation`]): Raw outputs of the model. target_sizes (`List[Tuple[int, int]]`, *optional*, defaults to `None`): A list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the batch. If left to None, predictions will not be resized. Returns: `List[torch.Tensor]`: A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each `torch.Tensor` correspond to a semantic class id. """ class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width] # Remove the null class `[..., :-1]` masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1] masks_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Semantic segmentation logits of shape (batch_size, num_classes, height, width) segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs) batch_size = class_queries_logits.shape[0] # Resize logits and compute semantic segmentation maps if target_sizes is not None: if batch_size != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) semantic_segmentation = [] for idx in range(batch_size): resized_logits = nn.functional.interpolate( segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False ) semantic_map = resized_logits[0].argmax(dim=0) semantic_segmentation.append(semantic_map) else: semantic_segmentation = segmentation.argmax(dim=1) semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.post_process_instance_segmentation with Detr->ConditionalDetr def post_process_instance_segmentation( self, outputs, threshold: float = 0.5, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, target_sizes: Optional[List[Tuple[int, int]]] = None, return_coco_annotation: Optional[bool] = False, ) -> List[Dict]: """ Converts the output of [`ConditionalDetrForSegmentation`] into instance segmentation predictions. Only supports PyTorch. Args: outputs ([`ConditionalDetrForSegmentation`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. target_sizes (`List[Tuple]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction. If left to None, predictions will not be resized. return_coco_annotation (`bool`, *optional*): Defaults to `False`. If set to `True`, segmentation maps are returned in COCO run-length encoding (RLE) format. Returns: `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- A tensor of shape `(height, width)` where each pixel represents a `segment_id` or `List[List]` run-length encoding (RLE) of the segmentation map if return_coco_annotation is set to `True`. Set to `None` if no mask if found above `threshold`. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- An integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **score** -- Prediction score of segment with `segment_id`. """ class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width] batch_size = class_queries_logits.shape[0] num_labels = class_queries_logits.shape[-1] - 1 mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Predicted label and score of each query (batch_size, num_queries) pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1) # Loop over items in batch size results: List[Dict[str, TensorType]] = [] for i in range(batch_size): mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects( mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels ) # No mask found if mask_probs_item.shape[0] <= 0: height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:] segmentation = torch.zeros((height, width)) - 1 results.append({"segmentation": segmentation, "segments_info": []}) continue # Get segmentation map and segment information of batch item target_size = target_sizes[i] if target_sizes is not None else None segmentation, segments = compute_segments( mask_probs=mask_probs_item, pred_scores=pred_scores_item, pred_labels=pred_labels_item, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, label_ids_to_fuse=[], target_size=target_size, ) # Return segmentation map in run-length encoding (RLE) format if return_coco_annotation: segmentation = convert_segmentation_to_rle(segmentation) results.append({"segmentation": segmentation, "segments_info": segments}) return results # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.post_process_panoptic_segmentation with Detr->ConditionalDetr def post_process_panoptic_segmentation( self, outputs, threshold: float = 0.5, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[Set[int]] = None, target_sizes: Optional[List[Tuple[int, int]]] = None, ) -> List[Dict]: """ Converts the output of [`ConditionalDetrForSegmentation`] into image panoptic segmentation predictions. Only supports PyTorch. Args: outputs ([`ConditionalDetrForSegmentation`]): The outputs from [`ConditionalDetrForSegmentation`]. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. label_ids_to_fuse (`Set[int]`, *optional*): The labels in this state will have all their instances be fused together. For instance we could say there can only be one sky in an image, but several persons, so the label ID for sky would be in that set, but not the one for person. target_sizes (`List[Tuple]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction in batch. If left to None, predictions will not be resized. Returns: `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id` or `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized to the corresponding `target_sizes` entry. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- an integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise. Multiple instances of the same class / label were fused and assigned a single `segment_id`. - **score** -- Prediction score of segment with `segment_id`. """ if label_ids_to_fuse is None: warnings.warn("`label_ids_to_fuse` unset. No instance will be fused.") label_ids_to_fuse = set() class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width] batch_size = class_queries_logits.shape[0] num_labels = class_queries_logits.shape[-1] - 1 mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Predicted label and score of each query (batch_size, num_queries) pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1) # Loop over items in batch size results: List[Dict[str, TensorType]] = [] for i in range(batch_size): mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects( mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels ) # No mask found if mask_probs_item.shape[0] <= 0: height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:] segmentation = torch.zeros((height, width)) - 1 results.append({"segmentation": segmentation, "segments_info": []}) continue # Get segmentation map and segment information of batch item target_size = target_sizes[i] if target_sizes is not None else None segmentation, segments = compute_segments( mask_probs=mask_probs_item, pred_scores=pred_scores_item, pred_labels=pred_labels_item, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, label_ids_to_fuse=label_ids_to_fuse, target_size=target_size, ) results.append({"segmentation": segmentation, "segments_info": segments}) return results
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Feature extractor class for Conditional DETR.""" import pathlib import warnings from typing import Dict, List, Optional, Set, Tuple, Union import numpy as np from PIL import Image from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...image_transforms import center_to_corners_format, corners_to_center_format, rgb_to_id from ...image_utils import ImageFeatureExtractionMixin from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch from torch import nn logger = logging.get_logger(__name__) ImageInput = Union[Image.Image, np.ndarray, "torch.Tensor", List[Image.Image], List[np.ndarray], List["torch.Tensor"]] # Copied from transformers.models.detr.feature_extraction_detr.masks_to_boxes def masks_to_boxes(masks): """ Compute the bounding boxes around the provided panoptic segmentation masks. The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions. Returns a [N, 4] tensor, with the boxes in corner (xyxy) format. """ if masks.size == 0: return np.zeros((0, 4)) h, w = masks.shape[-2:] y = np.arange(0, h, dtype=np.float32) x = np.arange(0, w, dtype=np.float32) # see https://github.com/pytorch/pytorch/issues/50276 y, x = np.meshgrid(y, x, indexing="ij") x_mask = masks * np.expand_dims(x, axis=0) x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1) x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool))) x_min = x.filled(fill_value=1e8) x_min = x_min.reshape(x_min.shape[0], -1).min(-1) y_mask = masks * np.expand_dims(y, axis=0) y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1) y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool))) y_min = y.filled(fill_value=1e8) y_min = y_min.reshape(y_min.shape[0], -1).min(-1) return np.stack([x_min, y_min, x_max, y_max], 1) # Copied from transformers.models.detr.feature_extraction_detr.binary_mask_to_rle def binary_mask_to_rle(mask): """ Args: Converts given binary mask of shape (height, width) to the run-length encoding (RLE) format. mask (`torch.Tensor` or `numpy.array`): A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target segment_id or class_id. Returns: `List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE format. """ if is_torch_tensor(mask): mask = mask.numpy() pixels = mask.flatten() pixels = np.concatenate([[0], pixels, [0]]) runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 runs[1::2] -= runs[::2] return [x for x in runs] # Copied from transformers.models.detr.feature_extraction_detr.convert_segmentation_to_rle def convert_segmentation_to_rle(segmentation): """ Converts given segmentation map of shape (height, width) to the run-length encoding (RLE) format. Args: segmentation (`torch.Tensor` or `numpy.array`): A segmentation map of shape `(height, width)` where each value denotes a segment or class id. Returns: `List[List]`: A list of lists, where each list is the run-length encoding of a segment / class id. """ segment_ids = torch.unique(segmentation) run_length_encodings = [] for idx in segment_ids: mask = torch.where(segmentation == idx, 1, 0) rle = binary_mask_to_rle(mask) run_length_encodings.append(rle) return run_length_encodings # Copied from transformers.models.detr.feature_extraction_detr.remove_low_and_no_objects def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels): """ Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and `labels`. Args: masks (`torch.Tensor`): A tensor of shape `(num_queries, height, width)`. scores (`torch.Tensor`): A tensor of shape `(num_queries)`. labels (`torch.Tensor`): A tensor of shape `(num_queries)`. object_mask_threshold (`float`): A number between 0 and 1 used to binarize the masks. Raises: `ValueError`: Raised when the first dimension doesn't match in all input tensors. Returns: `Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region < `object_mask_threshold`. """ if not (masks.shape[0] == scores.shape[0] == labels.shape[0]): raise ValueError("mask, scores and labels must have the same shape!") to_keep = labels.ne(num_labels) & (scores > object_mask_threshold) return masks[to_keep], scores[to_keep], labels[to_keep] # Copied from transformers.models.detr.feature_extraction_detr.check_segment_validity def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8): # Get the mask associated with the k class mask_k = mask_labels == k mask_k_area = mask_k.sum() # Compute the area of all the stuff in query k original_area = (mask_probs[k] >= mask_threshold).sum() mask_exists = mask_k_area > 0 and original_area > 0 # Eliminate disconnected tiny segments if mask_exists: area_ratio = mask_k_area / original_area if not area_ratio.item() > overlap_mask_area_threshold: mask_exists = False return mask_exists, mask_k # Copied from transformers.models.detr.feature_extraction_detr.compute_segments def compute_segments( mask_probs, pred_scores, pred_labels, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[Set[int]] = None, target_size: Tuple[int, int] = None, ): height = mask_probs.shape[1] if target_size is None else target_size[0] width = mask_probs.shape[2] if target_size is None else target_size[1] segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device) segments: List[Dict] = [] if target_size is not None: mask_probs = nn.functional.interpolate( mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False )[0] current_segment_id = 0 # Weigh each mask by its prediction score mask_probs *= pred_scores.view(-1, 1, 1) mask_labels = mask_probs.argmax(0) # [height, width] # Keep track of instances of each class stuff_memory_list: Dict[str, int] = {} for k in range(pred_labels.shape[0]): pred_class = pred_labels[k].item() should_fuse = pred_class in label_ids_to_fuse # Check if mask exists and large enough to be a segment mask_exists, mask_k = check_segment_validity( mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold ) if mask_exists: if pred_class in stuff_memory_list: current_segment_id = stuff_memory_list[pred_class] else: current_segment_id += 1 # Add current object segment to final segmentation map segmentation[mask_k] = current_segment_id segment_score = round(pred_scores[k].item(), 6) segments.append( { "id": current_segment_id, "label_id": pred_class, "was_fused": should_fuse, "score": segment_score, } ) if should_fuse: stuff_memory_list[pred_class] = current_segment_id return segmentation, segments class ConditionalDetrFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin): r""" Constructs a Conditional DETR feature extractor. This feature extractor inherits from [`FeatureExtractionMixin`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: format (`str`, *optional*, defaults to `"coco_detection"`): Data format of the annotations. One of "coco_detection" or "coco_panoptic". do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the input to a certain `size`. size (`int`, *optional*, defaults to 800): Resize the input to the given size. Only has an effect if `do_resize` is set to `True`. If size is a sequence like `(width, height)`, output size will be matched to this. If size is an int, smaller edge of the image will be matched to this number. i.e, if `height > width`, then image will be rescaled to `(size * height / width, size)`. max_size (`int`, *optional*, defaults to `1333`): The largest size an image dimension can have (otherwise it's capped). Only has an effect if `do_resize` is set to `True`. do_normalize (`bool`, *optional*, defaults to `True`): Whether or not to normalize the input with mean and standard deviation. image_mean (`int`, *optional*, defaults to `[0.485, 0.456, 0.406]`): The sequence of means for each channel, to be used when normalizing images. Defaults to the ImageNet mean. image_std (`int`, *optional*, defaults to `[0.229, 0.224, 0.225]`): The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the ImageNet std. """ model_input_names = ["pixel_values", "pixel_mask"] # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.__init__ def __init__( self, format="coco_detection", do_resize=True, size=800, max_size=1333, do_normalize=True, image_mean=None, image_std=None, **kwargs ): super().__init__(**kwargs) self.format = self._is_valid_format(format) self.do_resize = do_resize self.size = size self.max_size = max_size self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else [0.485, 0.456, 0.406] # ImageNet mean self.image_std = image_std if image_std is not None else [0.229, 0.224, 0.225] # ImageNet std # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor._is_valid_format def _is_valid_format(self, format): if format not in ["coco_detection", "coco_panoptic"]: raise ValueError(f"Format {format} not supported") return format # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.prepare def prepare(self, image, target, return_segmentation_masks=False, masks_path=None): if self.format == "coco_detection": image, target = self.prepare_coco_detection(image, target, return_segmentation_masks) return image, target elif self.format == "coco_panoptic": image, target = self.prepare_coco_panoptic(image, target, masks_path) return image, target else: raise ValueError(f"Format {self.format} not supported") # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.convert_coco_poly_to_mask def convert_coco_poly_to_mask(self, segmentations, height, width): try: from pycocotools import mask as coco_mask except ImportError: raise ImportError("Pycocotools is not installed in your environment.") masks = [] for polygons in segmentations: rles = coco_mask.frPyObjects(polygons, height, width) mask = coco_mask.decode(rles) if len(mask.shape) < 3: mask = mask[..., None] mask = np.asarray(mask, dtype=np.uint8) mask = np.any(mask, axis=2) masks.append(mask) if masks: masks = np.stack(masks, axis=0) else: masks = np.zeros((0, height, width), dtype=np.uint8) return masks # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.prepare_coco_detection with DETR->ConditionalDETR def prepare_coco_detection(self, image, target, return_segmentation_masks=False): """ Convert the target in COCO format into the format expected by ConditionalDETR. """ w, h = image.size image_id = target["image_id"] image_id = np.asarray([image_id], dtype=np.int64) # get all COCO annotations for the given image anno = target["annotations"] anno = [obj for obj in anno if "iscrowd" not in obj or obj["iscrowd"] == 0] boxes = [obj["bbox"] for obj in anno] # guard against no boxes via resizing boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4) boxes[:, 2:] += boxes[:, :2] boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=w) boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=h) classes = [obj["category_id"] for obj in anno] classes = np.asarray(classes, dtype=np.int64) if return_segmentation_masks: segmentations = [obj["segmentation"] for obj in anno] masks = self.convert_coco_poly_to_mask(segmentations, h, w) keypoints = None if anno and "keypoints" in anno[0]: keypoints = [obj["keypoints"] for obj in anno] keypoints = np.asarray(keypoints, dtype=np.float32) num_keypoints = keypoints.shape[0] if num_keypoints: keypoints = keypoints.reshape((-1, 3)) keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) boxes = boxes[keep] classes = classes[keep] if return_segmentation_masks: masks = masks[keep] if keypoints is not None: keypoints = keypoints[keep] target = {} target["boxes"] = boxes target["class_labels"] = classes if return_segmentation_masks: target["masks"] = masks target["image_id"] = image_id if keypoints is not None: target["keypoints"] = keypoints # for conversion to coco api area = np.asarray([obj["area"] for obj in anno], dtype=np.float32) iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in anno], dtype=np.int64) target["area"] = area[keep] target["iscrowd"] = iscrowd[keep] target["orig_size"] = np.asarray([int(h), int(w)], dtype=np.int64) target["size"] = np.asarray([int(h), int(w)], dtype=np.int64) return image, target # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.prepare_coco_panoptic def prepare_coco_panoptic(self, image, target, masks_path, return_masks=True): w, h = image.size ann_info = target.copy() ann_path = pathlib.Path(masks_path) / ann_info["file_name"] if "segments_info" in ann_info: masks = np.asarray(Image.open(ann_path), dtype=np.uint32) masks = rgb_to_id(masks) ids = np.array([ann["id"] for ann in ann_info["segments_info"]]) masks = masks == ids[:, None, None] masks = np.asarray(masks, dtype=np.uint8) labels = np.asarray([ann["category_id"] for ann in ann_info["segments_info"]], dtype=np.int64) target = {} target["image_id"] = np.asarray( [ann_info["image_id"] if "image_id" in ann_info else ann_info["id"]], dtype=np.int64 ) if return_masks: target["masks"] = masks target["class_labels"] = labels target["boxes"] = masks_to_boxes(masks) target["size"] = np.asarray([int(h), int(w)], dtype=np.int64) target["orig_size"] = np.asarray([int(h), int(w)], dtype=np.int64) if "segments_info" in ann_info: target["iscrowd"] = np.asarray([ann["iscrowd"] for ann in ann_info["segments_info"]], dtype=np.int64) target["area"] = np.asarray([ann["area"] for ann in ann_info["segments_info"]], dtype=np.float32) return image, target # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor._resize def _resize(self, image, size, target=None, max_size=None): """ Resize the image to the given size. Size can be min_size (scalar) or (w, h) tuple. If size is an int, smaller edge of the image will be matched to this number. If given, also resize the target accordingly. """ if not isinstance(image, Image.Image): image = self.to_pil_image(image) def get_size_with_aspect_ratio(image_size, size, max_size=None): w, h = image_size if max_size is not None: min_original_size = float(min((w, h))) max_original_size = float(max((w, h))) if max_original_size / min_original_size * size > max_size: size = int(round(max_size * min_original_size / max_original_size)) if (w <= h and w == size) or (h <= w and h == size): return (h, w) if w < h: ow = size oh = int(size * h / w) else: oh = size ow = int(size * w / h) return (oh, ow) def get_size(image_size, size, max_size=None): if isinstance(size, (list, tuple)): return size else: # size returned must be (w, h) since we use PIL to resize images # so we revert the tuple return get_size_with_aspect_ratio(image_size, size, max_size)[::-1] size = get_size(image.size, size, max_size) rescaled_image = self.resize(image, size=size) if target is None: return rescaled_image, None ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size)) ratio_width, ratio_height = ratios target = target.copy() if "boxes" in target: boxes = target["boxes"] scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32) target["boxes"] = scaled_boxes if "area" in target: area = target["area"] scaled_area = area * (ratio_width * ratio_height) target["area"] = scaled_area w, h = size target["size"] = np.asarray([h, w], dtype=np.int64) if "masks" in target: # use PyTorch as current workaround # TODO replace by self.resize masks = torch.from_numpy(target["masks"][:, None]).float() interpolated_masks = nn.functional.interpolate(masks, size=(h, w), mode="nearest")[:, 0] > 0.5 target["masks"] = interpolated_masks.numpy() return rescaled_image, target # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor._normalize def _normalize(self, image, mean, std, target=None): """ Normalize the image with a certain mean and std. If given, also normalize the target bounding boxes based on the size of the image. """ image = self.normalize(image, mean=mean, std=std) if target is None: return image, None target = target.copy() h, w = image.shape[-2:] if "boxes" in target: boxes = target["boxes"] boxes = corners_to_center_format(boxes) boxes = boxes / np.asarray([w, h, w, h], dtype=np.float32) target["boxes"] = boxes return image, target # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.__call__ with Detr->ConditionalDetr,DETR->ConditionalDETR def __call__( self, images: ImageInput, annotations: Union[List[Dict], List[List[Dict]]] = None, return_segmentation_masks: Optional[bool] = False, masks_path: Optional[pathlib.Path] = None, pad_and_return_pixel_mask: Optional[bool] = True, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ) -> BatchFeature: """ Main method to prepare for the model one or several image(s) and optional annotations. Images are by default padded up to the largest image in a batch, and a pixel mask is created that indicates which pixels are real/which are padding. <Tip warning={true}> NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass PIL images. </Tip> Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. annotations (`Dict`, `List[Dict]`, *optional*): The corresponding annotations in COCO format. In case [`ConditionalDetrFeatureExtractor`] was initialized with `format = "coco_detection"`, the annotations for each image should have the following format: {'image_id': int, 'annotations': [annotation]}, with the annotations being a list of COCO object annotations. In case [`ConditionalDetrFeatureExtractor`] was initialized with `format = "coco_panoptic"`, the annotations for each image should have the following format: {'image_id': int, 'file_name': str, 'segments_info': [segment_info]} with segments_info being a list of COCO panoptic annotations. return_segmentation_masks (`Dict`, `List[Dict]`, *optional*, defaults to `False`): Whether to also include instance segmentation masks as part of the labels in case `format = "coco_detection"`. masks_path (`pathlib.Path`, *optional*): Path to the directory containing the PNG files that store the class-agnostic image segmentations. Only relevant in case [`ConditionalDetrFeatureExtractor`] was initialized with `format = "coco_panoptic"`. pad_and_return_pixel_mask (`bool`, *optional*, defaults to `True`): Whether or not to pad images up to the largest image in a batch and create a pixel mask. If left to the default, will return a pixel mask that is: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **pixel_values** -- Pixel values to be fed to a model. - **pixel_mask** -- Pixel mask to be fed to a model (when `pad_and_return_pixel_mask=True` or if *"pixel_mask"* is in `self.model_input_names`). - **labels** -- Optional labels to be fed to a model (when `annotations` are provided) """ # Input type checking for clearer error valid_images = False valid_annotations = False valid_masks_path = False # Check that images has a valid type if isinstance(images, (Image.Image, np.ndarray)) or is_torch_tensor(images): valid_images = True elif isinstance(images, (list, tuple)): if len(images) == 0 or isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0]): valid_images = True if not valid_images: raise ValueError( "Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example), " "`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples)." ) is_batched = bool( isinstance(images, (list, tuple)) and (isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0])) ) # Check that annotations has a valid type if annotations is not None: if not is_batched: if self.format == "coco_detection": if isinstance(annotations, dict) and "image_id" in annotations and "annotations" in annotations: if isinstance(annotations["annotations"], (list, tuple)): # an image can have no annotations if len(annotations["annotations"]) == 0 or isinstance(annotations["annotations"][0], dict): valid_annotations = True elif self.format == "coco_panoptic": if isinstance(annotations, dict) and "image_id" in annotations and "segments_info" in annotations: if isinstance(annotations["segments_info"], (list, tuple)): # an image can have no segments (?) if len(annotations["segments_info"]) == 0 or isinstance( annotations["segments_info"][0], dict ): valid_annotations = True else: if isinstance(annotations, (list, tuple)): if len(images) != len(annotations): raise ValueError("There must be as many annotations as there are images") if isinstance(annotations[0], Dict): if self.format == "coco_detection": if isinstance(annotations[0]["annotations"], (list, tuple)): valid_annotations = True elif self.format == "coco_panoptic": if isinstance(annotations[0]["segments_info"], (list, tuple)): valid_annotations = True if not valid_annotations: raise ValueError( """ Annotations must of type `Dict` (single image) or `List[Dict]` (batch of images). In case of object detection, each dictionary should contain the keys 'image_id' and 'annotations', with the latter being a list of annotations in COCO format. In case of panoptic segmentation, each dictionary should contain the keys 'file_name', 'image_id' and 'segments_info', with the latter being a list of annotations in COCO format. """ ) # Check that masks_path has a valid type if masks_path is not None: if self.format == "coco_panoptic": if isinstance(masks_path, pathlib.Path): valid_masks_path = True if not valid_masks_path: raise ValueError( "The path to the directory containing the mask PNG files should be provided as a" " `pathlib.Path` object." ) if not is_batched: images = [images] if annotations is not None: annotations = [annotations] # Create a copy of the list to avoid editing it in place images = [image for image in images] if annotations is not None: annotations = [annotation for annotation in annotations] # prepare (COCO annotations as a list of Dict -> ConditionalDETR target as a single Dict per image) if annotations is not None: for idx, (image, target) in enumerate(zip(images, annotations)): if not isinstance(image, Image.Image): image = self.to_pil_image(image) image, target = self.prepare(image, target, return_segmentation_masks, masks_path) images[idx] = image annotations[idx] = target # transformations (resizing + normalization) if self.do_resize and self.size is not None: if annotations is not None: for idx, (image, target) in enumerate(zip(images, annotations)): image, target = self._resize(image=image, target=target, size=self.size, max_size=self.max_size) images[idx] = image annotations[idx] = target else: for idx, image in enumerate(images): images[idx] = self._resize(image=image, target=None, size=self.size, max_size=self.max_size)[0] if self.do_normalize: if annotations is not None: for idx, (image, target) in enumerate(zip(images, annotations)): image, target = self._normalize( image=image, mean=self.image_mean, std=self.image_std, target=target ) images[idx] = image annotations[idx] = target else: images = [ self._normalize(image=image, mean=self.image_mean, std=self.image_std)[0] for image in images ] else: images = [np.array(image) for image in images] if pad_and_return_pixel_mask: # pad images up to largest image in batch and create pixel_mask max_size = self._max_by_axis([list(image.shape) for image in images]) c, h, w = max_size padded_images = [] pixel_mask = [] for image in images: # create padded image padded_image = np.zeros((c, h, w), dtype=np.float32) padded_image[: image.shape[0], : image.shape[1], : image.shape[2]] = np.copy(image) padded_images.append(padded_image) # create pixel mask mask = np.zeros((h, w), dtype=np.int64) mask[: image.shape[1], : image.shape[2]] = True pixel_mask.append(mask) images = padded_images # return as BatchFeature data = {} data["pixel_values"] = images if pad_and_return_pixel_mask: data["pixel_mask"] = pixel_mask encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) if annotations is not None: # Convert to TensorType tensor_type = return_tensors if not isinstance(tensor_type, TensorType): tensor_type = TensorType(tensor_type) if not tensor_type == TensorType.PYTORCH: raise ValueError("Only PyTorch is supported for the moment.") else: if not is_torch_available(): raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed.") encoded_inputs["labels"] = [ {k: torch.from_numpy(v) for k, v in target.items()} for target in annotations ] return encoded_inputs # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor._max_by_axis def _max_by_axis(self, the_list): # type: (List[List[int]]) -> List[int] maxes = the_list[0] for sublist in the_list[1:]: for index, item in enumerate(sublist): maxes[index] = max(maxes[index], item) return maxes # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.pad_and_create_pixel_mask def pad_and_create_pixel_mask( self, pixel_values_list: List["torch.Tensor"], return_tensors: Optional[Union[str, TensorType]] = None ): """ Pad images up to the largest image in a batch and create a corresponding `pixel_mask`. Args: pixel_values_list (`List[torch.Tensor]`): List of images (pixel values) to be padded. Each image should be a tensor of shape (C, H, W). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **pixel_values** -- Pixel values to be fed to a model. - **pixel_mask** -- Pixel mask to be fed to a model (when `pad_and_return_pixel_mask=True` or if *"pixel_mask"* is in `self.model_input_names`). """ max_size = self._max_by_axis([list(image.shape) for image in pixel_values_list]) c, h, w = max_size padded_images = [] pixel_mask = [] for image in pixel_values_list: # create padded image padded_image = np.zeros((c, h, w), dtype=np.float32) padded_image[: image.shape[0], : image.shape[1], : image.shape[2]] = np.copy(image) padded_images.append(padded_image) # create pixel mask mask = np.zeros((h, w), dtype=np.int64) mask[: image.shape[1], : image.shape[2]] = True pixel_mask.append(mask) # return as BatchFeature data = {"pixel_values": padded_images, "pixel_mask": pixel_mask} encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) return encoded_inputs def post_process(self, outputs, target_sizes): """ Args: Converts the output of [`ConditionalDetrForObjectDetection`] into the format expected by the COCO api. Only supports PyTorch. outputs ([`ConditionalDetrObjectDetectionOutput`]): Raw outputs of the model. target_sizes (`torch.Tensor` of shape `(batch_size, 2)`): Tensor containing the size (h, w) of each image of the batch. For evaluation, this must be the original image size (before any data augmentation). For visualization, this should be the image size after data augment, but before padding. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ warnings.warn( "`post_process` is deprecated and will be removed in v5 of Transformers, please use" " `post_process_object_detection`", FutureWarning, ) out_logits, out_bbox = outputs.logits, outputs.pred_boxes if len(out_logits) != len(target_sizes): raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits") if target_sizes.shape[1] != 2: raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch") prob = out_logits.sigmoid() topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 300, dim=1) scores = topk_values topk_boxes = topk_indexes // out_logits.shape[2] labels = topk_indexes % out_logits.shape[2] boxes = center_to_corners_format(out_bbox) boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4)) # and from relative [0, 1] to absolute [0, height] coordinates img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) boxes = boxes * scale_fct[:, None, :] results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)] return results # Copied from transformers.models.deformable_detr.feature_extraction_deformable_detr.DeformableDetrFeatureExtractor.post_process_object_detection with DeformableDetr->ConditionalDetr def post_process_object_detection( self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, List[Tuple]] = None ): """ Converts the output of [`ConditionalDetrForObjectDetection`] into the format expected by the COCO api. Only supports PyTorch. Args: outputs ([`DetrObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*, defaults to `None`): Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the batch. If left to None, predictions will not be resized. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ out_logits, out_bbox = outputs.logits, outputs.pred_boxes if target_sizes is not None: if len(out_logits) != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) prob = out_logits.sigmoid() topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1) scores = topk_values topk_boxes = topk_indexes // out_logits.shape[2] labels = topk_indexes % out_logits.shape[2] boxes = center_to_corners_format(out_bbox) boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4)) # and from relative [0, 1] to absolute [0, height] coordinates if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) else: img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) boxes = boxes * scale_fct[:, None, :] results = [] for s, l, b in zip(scores, labels, boxes): score = s[s > threshold] label = l[s > threshold] box = b[s > threshold] results.append({"scores": score, "labels": label, "boxes": box}) return results # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.post_process_semantic_segmentation with Detr->ConditionalDetr def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple[int, int]] = None): """ Converts the output of [`ConditionalDetrForSegmentation`] into semantic segmentation maps. Only supports PyTorch. Args: outputs ([`ConditionalDetrForSegmentation`]): Raw outputs of the model. target_sizes (`List[Tuple[int, int]]`, *optional*, defaults to `None`): A list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the batch. If left to None, predictions will not be resized. Returns: `List[torch.Tensor]`: A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each `torch.Tensor` correspond to a semantic class id. """ class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width] # Remove the null class `[..., :-1]` masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1] masks_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Semantic segmentation logits of shape (batch_size, num_classes, height, width) segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs) batch_size = class_queries_logits.shape[0] # Resize logits and compute semantic segmentation maps if target_sizes is not None: if batch_size != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) semantic_segmentation = [] for idx in range(batch_size): resized_logits = nn.functional.interpolate( segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False ) semantic_map = resized_logits[0].argmax(dim=0) semantic_segmentation.append(semantic_map) else: semantic_segmentation = segmentation.argmax(dim=1) semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.post_process_instance_segmentation with Detr->ConditionalDetr def post_process_instance_segmentation( self, outputs, threshold: float = 0.5, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, target_sizes: Optional[List[Tuple[int, int]]] = None, return_coco_annotation: Optional[bool] = False, ) -> List[Dict]: """ Converts the output of [`ConditionalDetrForSegmentation`] into instance segmentation predictions. Only supports PyTorch. Args: outputs ([`ConditionalDetrForSegmentation`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. target_sizes (`List[Tuple]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction. If left to None, predictions will not be resized. return_coco_annotation (`bool`, *optional*): Defaults to `False`. If set to `True`, segmentation maps are returned in COCO run-length encoding (RLE) format. Returns: `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- A tensor of shape `(height, width)` where each pixel represents a `segment_id` or `List[List]` run-length encoding (RLE) of the segmentation map if return_coco_annotation is set to `True`. Set to `None` if no mask if found above `threshold`. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- An integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **score** -- Prediction score of segment with `segment_id`. """ class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width] batch_size = class_queries_logits.shape[0] num_labels = class_queries_logits.shape[-1] - 1 mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Predicted label and score of each query (batch_size, num_queries) pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1) # Loop over items in batch size results: List[Dict[str, TensorType]] = [] for i in range(batch_size): mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects( mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels ) # No mask found if mask_probs_item.shape[0] <= 0: height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:] segmentation = torch.zeros((height, width)) - 1 results.append({"segmentation": segmentation, "segments_info": []}) continue # Get segmentation map and segment information of batch item target_size = target_sizes[i] if target_sizes is not None else None segmentation, segments = compute_segments( mask_probs=mask_probs_item, pred_scores=pred_scores_item, pred_labels=pred_labels_item, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, label_ids_to_fuse=[], target_size=target_size, ) # Return segmentation map in run-length encoding (RLE) format if return_coco_annotation: segmentation = convert_segmentation_to_rle(segmentation) results.append({"segmentation": segmentation, "segments_info": segments}) return results # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.post_process_panoptic_segmentation with Detr->ConditionalDetr def post_process_panoptic_segmentation( self, outputs, threshold: float = 0.5, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[Set[int]] = None, target_sizes: Optional[List[Tuple[int, int]]] = None, ) -> List[Dict]: """ Converts the output of [`ConditionalDetrForSegmentation`] into image panoptic segmentation predictions. Only supports PyTorch. Args: outputs ([`ConditionalDetrForSegmentation`]): The outputs from [`ConditionalDetrForSegmentation`]. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. label_ids_to_fuse (`Set[int]`, *optional*): The labels in this state will have all their instances be fused together. For instance we could say there can only be one sky in an image, but several persons, so the label ID for sky would be in that set, but not the one for person. target_sizes (`List[Tuple]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction in batch. If left to None, predictions will not be resized. Returns: `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id` or `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized to the corresponding `target_sizes` entry. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- an integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise. Multiple instances of the same class / label were fused and assigned a single `segment_id`. - **score** -- Prediction score of segment with `segment_id`. """ if label_ids_to_fuse is None: warnings.warn("`label_ids_to_fuse` unset. No instance will be fused.") label_ids_to_fuse = set() class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width] batch_size = class_queries_logits.shape[0] num_labels = class_queries_logits.shape[-1] - 1 mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Predicted label and score of each query (batch_size, num_queries) pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1) # Loop over items in batch size results: List[Dict[str, TensorType]] = [] for i in range(batch_size): mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects( mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels ) # No mask found if mask_probs_item.shape[0] <= 0: height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:] segmentation = torch.zeros((height, width)) - 1 results.append({"segmentation": segmentation, "segments_info": []}) continue # Get segmentation map and segment information of batch item target_size = target_sizes[i] if target_sizes is not None else None segmentation, segments = compute_segments( mask_probs=mask_probs_item, pred_scores=pred_scores_item, pred_labels=pred_labels_item, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, label_ids_to_fuse=label_ids_to_fuse, target_size=target_size, ) results.append({"segmentation": segmentation, "segments_info": segments}) return results
1
huggingface/transformers
20,304
fix device issue
# What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
ydshieh
"2022-11-17T17:48:16Z"
"2022-11-21T09:12:26Z"
d316037ad71f8748aac9045ffd96970826456a04
8503cc755050c6ed5bc771e3244c29b71be1841e
fix device issue. # What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
./src/transformers/models/deformable_detr/feature_extraction_deformable_detr.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Feature extractor class for Deformable DETR.""" import pathlib import warnings from typing import Dict, List, Optional, Tuple, Union import numpy as np from PIL import Image from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...image_transforms import center_to_corners_format, corners_to_center_format, rgb_to_id from ...image_utils import ImageFeatureExtractionMixin from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch from torch import nn logger = logging.get_logger(__name__) ImageInput = Union[Image.Image, np.ndarray, "torch.Tensor", List[Image.Image], List[np.ndarray], List["torch.Tensor"]] # Copied from transformers.models.detr.feature_extraction_detr.masks_to_boxes def masks_to_boxes(masks): """ Compute the bounding boxes around the provided panoptic segmentation masks. The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions. Returns a [N, 4] tensor, with the boxes in corner (xyxy) format. """ if masks.size == 0: return np.zeros((0, 4)) h, w = masks.shape[-2:] y = np.arange(0, h, dtype=np.float32) x = np.arange(0, w, dtype=np.float32) # see https://github.com/pytorch/pytorch/issues/50276 y, x = np.meshgrid(y, x, indexing="ij") x_mask = masks * np.expand_dims(x, axis=0) x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1) x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool))) x_min = x.filled(fill_value=1e8) x_min = x_min.reshape(x_min.shape[0], -1).min(-1) y_mask = masks * np.expand_dims(y, axis=0) y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1) y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool))) y_min = y.filled(fill_value=1e8) y_min = y_min.reshape(y_min.shape[0], -1).min(-1) return np.stack([x_min, y_min, x_max, y_max], 1) class DeformableDetrFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin): r""" Constructs a Deformable DETR feature extractor. Differs only in the postprocessing of object detection compared to DETR. This feature extractor inherits from [`FeatureExtractionMixin`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: format (`str`, *optional*, defaults to `"coco_detection"`): Data format of the annotations. One of "coco_detection" or "coco_panoptic". do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the input to a certain `size`. size (`int`, *optional*, defaults to 800): Resize the input to the given size. Only has an effect if `do_resize` is set to `True`. If size is a sequence like `(width, height)`, output size will be matched to this. If size is an int, smaller edge of the image will be matched to this number. i.e, if `height > width`, then image will be rescaled to `(size * height / width, size)`. max_size (`int`, *optional*, defaults to 1333): The largest size an image dimension can have (otherwise it's capped). Only has an effect if `do_resize` is set to `True`. do_normalize (`bool`, *optional*, defaults to `True`): Whether or not to normalize the input with mean and standard deviation. image_mean (`int`, *optional*, defaults to `[0.485, 0.456, 0.406]`): The sequence of means for each channel, to be used when normalizing images. Defaults to the ImageNet mean. image_std (`int`, *optional*, defaults to `[0.229, 0.224, 0.225]`): The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the ImageNet std. """ model_input_names = ["pixel_values", "pixel_mask"] # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.__init__ def __init__( self, format="coco_detection", do_resize=True, size=800, max_size=1333, do_normalize=True, image_mean=None, image_std=None, **kwargs ): super().__init__(**kwargs) self.format = self._is_valid_format(format) self.do_resize = do_resize self.size = size self.max_size = max_size self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else [0.485, 0.456, 0.406] # ImageNet mean self.image_std = image_std if image_std is not None else [0.229, 0.224, 0.225] # ImageNet std # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor._is_valid_format def _is_valid_format(self, format): if format not in ["coco_detection", "coco_panoptic"]: raise ValueError(f"Format {format} not supported") return format # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.prepare def prepare(self, image, target, return_segmentation_masks=False, masks_path=None): if self.format == "coco_detection": image, target = self.prepare_coco_detection(image, target, return_segmentation_masks) return image, target elif self.format == "coco_panoptic": image, target = self.prepare_coco_panoptic(image, target, masks_path) return image, target else: raise ValueError(f"Format {self.format} not supported") # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.convert_coco_poly_to_mask def convert_coco_poly_to_mask(self, segmentations, height, width): try: from pycocotools import mask as coco_mask except ImportError: raise ImportError("Pycocotools is not installed in your environment.") masks = [] for polygons in segmentations: rles = coco_mask.frPyObjects(polygons, height, width) mask = coco_mask.decode(rles) if len(mask.shape) < 3: mask = mask[..., None] mask = np.asarray(mask, dtype=np.uint8) mask = np.any(mask, axis=2) masks.append(mask) if masks: masks = np.stack(masks, axis=0) else: masks = np.zeros((0, height, width), dtype=np.uint8) return masks # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.prepare_coco_detection def prepare_coco_detection(self, image, target, return_segmentation_masks=False): """ Convert the target in COCO format into the format expected by DETR. """ w, h = image.size image_id = target["image_id"] image_id = np.asarray([image_id], dtype=np.int64) # get all COCO annotations for the given image anno = target["annotations"] anno = [obj for obj in anno if "iscrowd" not in obj or obj["iscrowd"] == 0] boxes = [obj["bbox"] for obj in anno] # guard against no boxes via resizing boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4) boxes[:, 2:] += boxes[:, :2] boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=w) boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=h) classes = [obj["category_id"] for obj in anno] classes = np.asarray(classes, dtype=np.int64) if return_segmentation_masks: segmentations = [obj["segmentation"] for obj in anno] masks = self.convert_coco_poly_to_mask(segmentations, h, w) keypoints = None if anno and "keypoints" in anno[0]: keypoints = [obj["keypoints"] for obj in anno] keypoints = np.asarray(keypoints, dtype=np.float32) num_keypoints = keypoints.shape[0] if num_keypoints: keypoints = keypoints.reshape((-1, 3)) keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) boxes = boxes[keep] classes = classes[keep] if return_segmentation_masks: masks = masks[keep] if keypoints is not None: keypoints = keypoints[keep] target = {} target["boxes"] = boxes target["class_labels"] = classes if return_segmentation_masks: target["masks"] = masks target["image_id"] = image_id if keypoints is not None: target["keypoints"] = keypoints # for conversion to coco api area = np.asarray([obj["area"] for obj in anno], dtype=np.float32) iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in anno], dtype=np.int64) target["area"] = area[keep] target["iscrowd"] = iscrowd[keep] target["orig_size"] = np.asarray([int(h), int(w)], dtype=np.int64) target["size"] = np.asarray([int(h), int(w)], dtype=np.int64) return image, target # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.prepare_coco_panoptic def prepare_coco_panoptic(self, image, target, masks_path, return_masks=True): w, h = image.size ann_info = target.copy() ann_path = pathlib.Path(masks_path) / ann_info["file_name"] if "segments_info" in ann_info: masks = np.asarray(Image.open(ann_path), dtype=np.uint32) masks = rgb_to_id(masks) ids = np.array([ann["id"] for ann in ann_info["segments_info"]]) masks = masks == ids[:, None, None] masks = np.asarray(masks, dtype=np.uint8) labels = np.asarray([ann["category_id"] for ann in ann_info["segments_info"]], dtype=np.int64) target = {} target["image_id"] = np.asarray( [ann_info["image_id"] if "image_id" in ann_info else ann_info["id"]], dtype=np.int64 ) if return_masks: target["masks"] = masks target["class_labels"] = labels target["boxes"] = masks_to_boxes(masks) target["size"] = np.asarray([int(h), int(w)], dtype=np.int64) target["orig_size"] = np.asarray([int(h), int(w)], dtype=np.int64) if "segments_info" in ann_info: target["iscrowd"] = np.asarray([ann["iscrowd"] for ann in ann_info["segments_info"]], dtype=np.int64) target["area"] = np.asarray([ann["area"] for ann in ann_info["segments_info"]], dtype=np.float32) return image, target # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor._resize def _resize(self, image, size, target=None, max_size=None): """ Resize the image to the given size. Size can be min_size (scalar) or (w, h) tuple. If size is an int, smaller edge of the image will be matched to this number. If given, also resize the target accordingly. """ if not isinstance(image, Image.Image): image = self.to_pil_image(image) def get_size_with_aspect_ratio(image_size, size, max_size=None): w, h = image_size if max_size is not None: min_original_size = float(min((w, h))) max_original_size = float(max((w, h))) if max_original_size / min_original_size * size > max_size: size = int(round(max_size * min_original_size / max_original_size)) if (w <= h and w == size) or (h <= w and h == size): return (h, w) if w < h: ow = size oh = int(size * h / w) else: oh = size ow = int(size * w / h) return (oh, ow) def get_size(image_size, size, max_size=None): if isinstance(size, (list, tuple)): return size else: # size returned must be (w, h) since we use PIL to resize images # so we revert the tuple return get_size_with_aspect_ratio(image_size, size, max_size)[::-1] size = get_size(image.size, size, max_size) rescaled_image = self.resize(image, size=size) if target is None: return rescaled_image, None ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size)) ratio_width, ratio_height = ratios target = target.copy() if "boxes" in target: boxes = target["boxes"] scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32) target["boxes"] = scaled_boxes if "area" in target: area = target["area"] scaled_area = area * (ratio_width * ratio_height) target["area"] = scaled_area w, h = size target["size"] = np.asarray([h, w], dtype=np.int64) if "masks" in target: # use PyTorch as current workaround # TODO replace by self.resize masks = torch.from_numpy(target["masks"][:, None]).float() interpolated_masks = nn.functional.interpolate(masks, size=(h, w), mode="nearest")[:, 0] > 0.5 target["masks"] = interpolated_masks.numpy() return rescaled_image, target # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor._normalize def _normalize(self, image, mean, std, target=None): """ Normalize the image with a certain mean and std. If given, also normalize the target bounding boxes based on the size of the image. """ image = self.normalize(image, mean=mean, std=std) if target is None: return image, None target = target.copy() h, w = image.shape[-2:] if "boxes" in target: boxes = target["boxes"] boxes = corners_to_center_format(boxes) boxes = boxes / np.asarray([w, h, w, h], dtype=np.float32) target["boxes"] = boxes return image, target # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.__call__ def __call__( self, images: ImageInput, annotations: Union[List[Dict], List[List[Dict]]] = None, return_segmentation_masks: Optional[bool] = False, masks_path: Optional[pathlib.Path] = None, pad_and_return_pixel_mask: Optional[bool] = True, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ) -> BatchFeature: """ Main method to prepare for the model one or several image(s) and optional annotations. Images are by default padded up to the largest image in a batch, and a pixel mask is created that indicates which pixels are real/which are padding. <Tip warning={true}> NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass PIL images. </Tip> Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. annotations (`Dict`, `List[Dict]`, *optional*): The corresponding annotations in COCO format. In case [`DetrFeatureExtractor`] was initialized with `format = "coco_detection"`, the annotations for each image should have the following format: {'image_id': int, 'annotations': [annotation]}, with the annotations being a list of COCO object annotations. In case [`DetrFeatureExtractor`] was initialized with `format = "coco_panoptic"`, the annotations for each image should have the following format: {'image_id': int, 'file_name': str, 'segments_info': [segment_info]} with segments_info being a list of COCO panoptic annotations. return_segmentation_masks (`Dict`, `List[Dict]`, *optional*, defaults to `False`): Whether to also include instance segmentation masks as part of the labels in case `format = "coco_detection"`. masks_path (`pathlib.Path`, *optional*): Path to the directory containing the PNG files that store the class-agnostic image segmentations. Only relevant in case [`DetrFeatureExtractor`] was initialized with `format = "coco_panoptic"`. pad_and_return_pixel_mask (`bool`, *optional*, defaults to `True`): Whether or not to pad images up to the largest image in a batch and create a pixel mask. If left to the default, will return a pixel mask that is: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **pixel_values** -- Pixel values to be fed to a model. - **pixel_mask** -- Pixel mask to be fed to a model (when `pad_and_return_pixel_mask=True` or if *"pixel_mask"* is in `self.model_input_names`). - **labels** -- Optional labels to be fed to a model (when `annotations` are provided) """ # Input type checking for clearer error valid_images = False valid_annotations = False valid_masks_path = False # Check that images has a valid type if isinstance(images, (Image.Image, np.ndarray)) or is_torch_tensor(images): valid_images = True elif isinstance(images, (list, tuple)): if len(images) == 0 or isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0]): valid_images = True if not valid_images: raise ValueError( "Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example), " "`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples)." ) is_batched = bool( isinstance(images, (list, tuple)) and (isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0])) ) # Check that annotations has a valid type if annotations is not None: if not is_batched: if self.format == "coco_detection": if isinstance(annotations, dict) and "image_id" in annotations and "annotations" in annotations: if isinstance(annotations["annotations"], (list, tuple)): # an image can have no annotations if len(annotations["annotations"]) == 0 or isinstance(annotations["annotations"][0], dict): valid_annotations = True elif self.format == "coco_panoptic": if isinstance(annotations, dict) and "image_id" in annotations and "segments_info" in annotations: if isinstance(annotations["segments_info"], (list, tuple)): # an image can have no segments (?) if len(annotations["segments_info"]) == 0 or isinstance( annotations["segments_info"][0], dict ): valid_annotations = True else: if isinstance(annotations, (list, tuple)): if len(images) != len(annotations): raise ValueError("There must be as many annotations as there are images") if isinstance(annotations[0], Dict): if self.format == "coco_detection": if isinstance(annotations[0]["annotations"], (list, tuple)): valid_annotations = True elif self.format == "coco_panoptic": if isinstance(annotations[0]["segments_info"], (list, tuple)): valid_annotations = True if not valid_annotations: raise ValueError( """ Annotations must of type `Dict` (single image) or `List[Dict]` (batch of images). In case of object detection, each dictionary should contain the keys 'image_id' and 'annotations', with the latter being a list of annotations in COCO format. In case of panoptic segmentation, each dictionary should contain the keys 'file_name', 'image_id' and 'segments_info', with the latter being a list of annotations in COCO format. """ ) # Check that masks_path has a valid type if masks_path is not None: if self.format == "coco_panoptic": if isinstance(masks_path, pathlib.Path): valid_masks_path = True if not valid_masks_path: raise ValueError( "The path to the directory containing the mask PNG files should be provided as a" " `pathlib.Path` object." ) if not is_batched: images = [images] if annotations is not None: annotations = [annotations] # Create a copy of the list to avoid editing it in place images = [image for image in images] if annotations is not None: annotations = [annotation for annotation in annotations] # prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image) if annotations is not None: for idx, (image, target) in enumerate(zip(images, annotations)): if not isinstance(image, Image.Image): image = self.to_pil_image(image) image, target = self.prepare(image, target, return_segmentation_masks, masks_path) images[idx] = image annotations[idx] = target # transformations (resizing + normalization) if self.do_resize and self.size is not None: if annotations is not None: for idx, (image, target) in enumerate(zip(images, annotations)): image, target = self._resize(image=image, target=target, size=self.size, max_size=self.max_size) images[idx] = image annotations[idx] = target else: for idx, image in enumerate(images): images[idx] = self._resize(image=image, target=None, size=self.size, max_size=self.max_size)[0] if self.do_normalize: if annotations is not None: for idx, (image, target) in enumerate(zip(images, annotations)): image, target = self._normalize( image=image, mean=self.image_mean, std=self.image_std, target=target ) images[idx] = image annotations[idx] = target else: images = [ self._normalize(image=image, mean=self.image_mean, std=self.image_std)[0] for image in images ] else: images = [np.array(image) for image in images] if pad_and_return_pixel_mask: # pad images up to largest image in batch and create pixel_mask max_size = self._max_by_axis([list(image.shape) for image in images]) c, h, w = max_size padded_images = [] pixel_mask = [] for image in images: # create padded image padded_image = np.zeros((c, h, w), dtype=np.float32) padded_image[: image.shape[0], : image.shape[1], : image.shape[2]] = np.copy(image) padded_images.append(padded_image) # create pixel mask mask = np.zeros((h, w), dtype=np.int64) mask[: image.shape[1], : image.shape[2]] = True pixel_mask.append(mask) images = padded_images # return as BatchFeature data = {} data["pixel_values"] = images if pad_and_return_pixel_mask: data["pixel_mask"] = pixel_mask encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) if annotations is not None: # Convert to TensorType tensor_type = return_tensors if not isinstance(tensor_type, TensorType): tensor_type = TensorType(tensor_type) if not tensor_type == TensorType.PYTORCH: raise ValueError("Only PyTorch is supported for the moment.") else: if not is_torch_available(): raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed.") encoded_inputs["labels"] = [ {k: torch.from_numpy(v) for k, v in target.items()} for target in annotations ] return encoded_inputs # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor._max_by_axis def _max_by_axis(self, the_list): # type: (List[List[int]]) -> List[int] maxes = the_list[0] for sublist in the_list[1:]: for index, item in enumerate(sublist): maxes[index] = max(maxes[index], item) return maxes # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.pad_and_create_pixel_mask def pad_and_create_pixel_mask( self, pixel_values_list: List["torch.Tensor"], return_tensors: Optional[Union[str, TensorType]] = None ): """ Pad images up to the largest image in a batch and create a corresponding `pixel_mask`. Args: pixel_values_list (`List[torch.Tensor]`): List of images (pixel values) to be padded. Each image should be a tensor of shape (C, H, W). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **pixel_values** -- Pixel values to be fed to a model. - **pixel_mask** -- Pixel mask to be fed to a model (when `pad_and_return_pixel_mask=True` or if *"pixel_mask"* is in `self.model_input_names`). """ max_size = self._max_by_axis([list(image.shape) for image in pixel_values_list]) c, h, w = max_size padded_images = [] pixel_mask = [] for image in pixel_values_list: # create padded image padded_image = np.zeros((c, h, w), dtype=np.float32) padded_image[: image.shape[0], : image.shape[1], : image.shape[2]] = np.copy(image) padded_images.append(padded_image) # create pixel mask mask = np.zeros((h, w), dtype=np.int64) mask[: image.shape[1], : image.shape[2]] = True pixel_mask.append(mask) # return as BatchFeature data = {"pixel_values": padded_images, "pixel_mask": pixel_mask} encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) return encoded_inputs def post_process(self, outputs, target_sizes): """ Converts the output of [`DeformableDetrForObjectDetection`] into the format expected by the COCO api. Only supports PyTorch. Args: outputs ([`DeformableDetrObjectDetectionOutput`]): Raw outputs of the model. target_sizes (`torch.Tensor` of shape `(batch_size, 2)`): Tensor containing the size (height, width) of each image of the batch. For evaluation, this must be the original image size (before any data augmentation). For visualization, this should be the image size after data augment, but before padding. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ warnings.warn( "`post_process` is deprecated and will be removed in v5 of Transformers, please use" " `post_process_object_detection`.", FutureWarning, ) out_logits, out_bbox = outputs.logits, outputs.pred_boxes if len(out_logits) != len(target_sizes): raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits") if target_sizes.shape[1] != 2: raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch") prob = out_logits.sigmoid() topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1) scores = topk_values topk_boxes = topk_indexes // out_logits.shape[2] labels = topk_indexes % out_logits.shape[2] boxes = center_to_corners_format(out_bbox) boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4)) # and from relative [0, 1] to absolute [0, height] coordinates img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) boxes = boxes * scale_fct[:, None, :] results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)] return results def post_process_object_detection( self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, List[Tuple]] = None ): """ Converts the output of [`DeformableDetrForObjectDetection`] into the format expected by the COCO api. Only supports PyTorch. Args: outputs ([`DetrObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*, defaults to `None`): Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the batch. If left to None, predictions will not be resized. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ out_logits, out_bbox = outputs.logits, outputs.pred_boxes if target_sizes is not None: if len(out_logits) != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) prob = out_logits.sigmoid() topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1) scores = topk_values topk_boxes = topk_indexes // out_logits.shape[2] labels = topk_indexes % out_logits.shape[2] boxes = center_to_corners_format(out_bbox) boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4)) # and from relative [0, 1] to absolute [0, height] coordinates if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) else: img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) boxes = boxes * scale_fct[:, None, :] results = [] for s, l, b in zip(scores, labels, boxes): score = s[s > threshold] label = l[s > threshold] box = b[s > threshold] results.append({"scores": score, "labels": label, "boxes": box}) return results
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Feature extractor class for Deformable DETR.""" import pathlib import warnings from typing import Dict, List, Optional, Tuple, Union import numpy as np from PIL import Image from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...image_transforms import center_to_corners_format, corners_to_center_format, rgb_to_id from ...image_utils import ImageFeatureExtractionMixin from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch from torch import nn logger = logging.get_logger(__name__) ImageInput = Union[Image.Image, np.ndarray, "torch.Tensor", List[Image.Image], List[np.ndarray], List["torch.Tensor"]] # Copied from transformers.models.detr.feature_extraction_detr.masks_to_boxes def masks_to_boxes(masks): """ Compute the bounding boxes around the provided panoptic segmentation masks. The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions. Returns a [N, 4] tensor, with the boxes in corner (xyxy) format. """ if masks.size == 0: return np.zeros((0, 4)) h, w = masks.shape[-2:] y = np.arange(0, h, dtype=np.float32) x = np.arange(0, w, dtype=np.float32) # see https://github.com/pytorch/pytorch/issues/50276 y, x = np.meshgrid(y, x, indexing="ij") x_mask = masks * np.expand_dims(x, axis=0) x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1) x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool))) x_min = x.filled(fill_value=1e8) x_min = x_min.reshape(x_min.shape[0], -1).min(-1) y_mask = masks * np.expand_dims(y, axis=0) y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1) y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool))) y_min = y.filled(fill_value=1e8) y_min = y_min.reshape(y_min.shape[0], -1).min(-1) return np.stack([x_min, y_min, x_max, y_max], 1) class DeformableDetrFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin): r""" Constructs a Deformable DETR feature extractor. Differs only in the postprocessing of object detection compared to DETR. This feature extractor inherits from [`FeatureExtractionMixin`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: format (`str`, *optional*, defaults to `"coco_detection"`): Data format of the annotations. One of "coco_detection" or "coco_panoptic". do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the input to a certain `size`. size (`int`, *optional*, defaults to 800): Resize the input to the given size. Only has an effect if `do_resize` is set to `True`. If size is a sequence like `(width, height)`, output size will be matched to this. If size is an int, smaller edge of the image will be matched to this number. i.e, if `height > width`, then image will be rescaled to `(size * height / width, size)`. max_size (`int`, *optional*, defaults to 1333): The largest size an image dimension can have (otherwise it's capped). Only has an effect if `do_resize` is set to `True`. do_normalize (`bool`, *optional*, defaults to `True`): Whether or not to normalize the input with mean and standard deviation. image_mean (`int`, *optional*, defaults to `[0.485, 0.456, 0.406]`): The sequence of means for each channel, to be used when normalizing images. Defaults to the ImageNet mean. image_std (`int`, *optional*, defaults to `[0.229, 0.224, 0.225]`): The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the ImageNet std. """ model_input_names = ["pixel_values", "pixel_mask"] # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.__init__ def __init__( self, format="coco_detection", do_resize=True, size=800, max_size=1333, do_normalize=True, image_mean=None, image_std=None, **kwargs ): super().__init__(**kwargs) self.format = self._is_valid_format(format) self.do_resize = do_resize self.size = size self.max_size = max_size self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else [0.485, 0.456, 0.406] # ImageNet mean self.image_std = image_std if image_std is not None else [0.229, 0.224, 0.225] # ImageNet std # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor._is_valid_format def _is_valid_format(self, format): if format not in ["coco_detection", "coco_panoptic"]: raise ValueError(f"Format {format} not supported") return format # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.prepare def prepare(self, image, target, return_segmentation_masks=False, masks_path=None): if self.format == "coco_detection": image, target = self.prepare_coco_detection(image, target, return_segmentation_masks) return image, target elif self.format == "coco_panoptic": image, target = self.prepare_coco_panoptic(image, target, masks_path) return image, target else: raise ValueError(f"Format {self.format} not supported") # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.convert_coco_poly_to_mask def convert_coco_poly_to_mask(self, segmentations, height, width): try: from pycocotools import mask as coco_mask except ImportError: raise ImportError("Pycocotools is not installed in your environment.") masks = [] for polygons in segmentations: rles = coco_mask.frPyObjects(polygons, height, width) mask = coco_mask.decode(rles) if len(mask.shape) < 3: mask = mask[..., None] mask = np.asarray(mask, dtype=np.uint8) mask = np.any(mask, axis=2) masks.append(mask) if masks: masks = np.stack(masks, axis=0) else: masks = np.zeros((0, height, width), dtype=np.uint8) return masks # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.prepare_coco_detection def prepare_coco_detection(self, image, target, return_segmentation_masks=False): """ Convert the target in COCO format into the format expected by DETR. """ w, h = image.size image_id = target["image_id"] image_id = np.asarray([image_id], dtype=np.int64) # get all COCO annotations for the given image anno = target["annotations"] anno = [obj for obj in anno if "iscrowd" not in obj or obj["iscrowd"] == 0] boxes = [obj["bbox"] for obj in anno] # guard against no boxes via resizing boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4) boxes[:, 2:] += boxes[:, :2] boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=w) boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=h) classes = [obj["category_id"] for obj in anno] classes = np.asarray(classes, dtype=np.int64) if return_segmentation_masks: segmentations = [obj["segmentation"] for obj in anno] masks = self.convert_coco_poly_to_mask(segmentations, h, w) keypoints = None if anno and "keypoints" in anno[0]: keypoints = [obj["keypoints"] for obj in anno] keypoints = np.asarray(keypoints, dtype=np.float32) num_keypoints = keypoints.shape[0] if num_keypoints: keypoints = keypoints.reshape((-1, 3)) keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) boxes = boxes[keep] classes = classes[keep] if return_segmentation_masks: masks = masks[keep] if keypoints is not None: keypoints = keypoints[keep] target = {} target["boxes"] = boxes target["class_labels"] = classes if return_segmentation_masks: target["masks"] = masks target["image_id"] = image_id if keypoints is not None: target["keypoints"] = keypoints # for conversion to coco api area = np.asarray([obj["area"] for obj in anno], dtype=np.float32) iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in anno], dtype=np.int64) target["area"] = area[keep] target["iscrowd"] = iscrowd[keep] target["orig_size"] = np.asarray([int(h), int(w)], dtype=np.int64) target["size"] = np.asarray([int(h), int(w)], dtype=np.int64) return image, target # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.prepare_coco_panoptic def prepare_coco_panoptic(self, image, target, masks_path, return_masks=True): w, h = image.size ann_info = target.copy() ann_path = pathlib.Path(masks_path) / ann_info["file_name"] if "segments_info" in ann_info: masks = np.asarray(Image.open(ann_path), dtype=np.uint32) masks = rgb_to_id(masks) ids = np.array([ann["id"] for ann in ann_info["segments_info"]]) masks = masks == ids[:, None, None] masks = np.asarray(masks, dtype=np.uint8) labels = np.asarray([ann["category_id"] for ann in ann_info["segments_info"]], dtype=np.int64) target = {} target["image_id"] = np.asarray( [ann_info["image_id"] if "image_id" in ann_info else ann_info["id"]], dtype=np.int64 ) if return_masks: target["masks"] = masks target["class_labels"] = labels target["boxes"] = masks_to_boxes(masks) target["size"] = np.asarray([int(h), int(w)], dtype=np.int64) target["orig_size"] = np.asarray([int(h), int(w)], dtype=np.int64) if "segments_info" in ann_info: target["iscrowd"] = np.asarray([ann["iscrowd"] for ann in ann_info["segments_info"]], dtype=np.int64) target["area"] = np.asarray([ann["area"] for ann in ann_info["segments_info"]], dtype=np.float32) return image, target # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor._resize def _resize(self, image, size, target=None, max_size=None): """ Resize the image to the given size. Size can be min_size (scalar) or (w, h) tuple. If size is an int, smaller edge of the image will be matched to this number. If given, also resize the target accordingly. """ if not isinstance(image, Image.Image): image = self.to_pil_image(image) def get_size_with_aspect_ratio(image_size, size, max_size=None): w, h = image_size if max_size is not None: min_original_size = float(min((w, h))) max_original_size = float(max((w, h))) if max_original_size / min_original_size * size > max_size: size = int(round(max_size * min_original_size / max_original_size)) if (w <= h and w == size) or (h <= w and h == size): return (h, w) if w < h: ow = size oh = int(size * h / w) else: oh = size ow = int(size * w / h) return (oh, ow) def get_size(image_size, size, max_size=None): if isinstance(size, (list, tuple)): return size else: # size returned must be (w, h) since we use PIL to resize images # so we revert the tuple return get_size_with_aspect_ratio(image_size, size, max_size)[::-1] size = get_size(image.size, size, max_size) rescaled_image = self.resize(image, size=size) if target is None: return rescaled_image, None ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size)) ratio_width, ratio_height = ratios target = target.copy() if "boxes" in target: boxes = target["boxes"] scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32) target["boxes"] = scaled_boxes if "area" in target: area = target["area"] scaled_area = area * (ratio_width * ratio_height) target["area"] = scaled_area w, h = size target["size"] = np.asarray([h, w], dtype=np.int64) if "masks" in target: # use PyTorch as current workaround # TODO replace by self.resize masks = torch.from_numpy(target["masks"][:, None]).float() interpolated_masks = nn.functional.interpolate(masks, size=(h, w), mode="nearest")[:, 0] > 0.5 target["masks"] = interpolated_masks.numpy() return rescaled_image, target # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor._normalize def _normalize(self, image, mean, std, target=None): """ Normalize the image with a certain mean and std. If given, also normalize the target bounding boxes based on the size of the image. """ image = self.normalize(image, mean=mean, std=std) if target is None: return image, None target = target.copy() h, w = image.shape[-2:] if "boxes" in target: boxes = target["boxes"] boxes = corners_to_center_format(boxes) boxes = boxes / np.asarray([w, h, w, h], dtype=np.float32) target["boxes"] = boxes return image, target # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.__call__ def __call__( self, images: ImageInput, annotations: Union[List[Dict], List[List[Dict]]] = None, return_segmentation_masks: Optional[bool] = False, masks_path: Optional[pathlib.Path] = None, pad_and_return_pixel_mask: Optional[bool] = True, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ) -> BatchFeature: """ Main method to prepare for the model one or several image(s) and optional annotations. Images are by default padded up to the largest image in a batch, and a pixel mask is created that indicates which pixels are real/which are padding. <Tip warning={true}> NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass PIL images. </Tip> Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. annotations (`Dict`, `List[Dict]`, *optional*): The corresponding annotations in COCO format. In case [`DetrFeatureExtractor`] was initialized with `format = "coco_detection"`, the annotations for each image should have the following format: {'image_id': int, 'annotations': [annotation]}, with the annotations being a list of COCO object annotations. In case [`DetrFeatureExtractor`] was initialized with `format = "coco_panoptic"`, the annotations for each image should have the following format: {'image_id': int, 'file_name': str, 'segments_info': [segment_info]} with segments_info being a list of COCO panoptic annotations. return_segmentation_masks (`Dict`, `List[Dict]`, *optional*, defaults to `False`): Whether to also include instance segmentation masks as part of the labels in case `format = "coco_detection"`. masks_path (`pathlib.Path`, *optional*): Path to the directory containing the PNG files that store the class-agnostic image segmentations. Only relevant in case [`DetrFeatureExtractor`] was initialized with `format = "coco_panoptic"`. pad_and_return_pixel_mask (`bool`, *optional*, defaults to `True`): Whether or not to pad images up to the largest image in a batch and create a pixel mask. If left to the default, will return a pixel mask that is: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **pixel_values** -- Pixel values to be fed to a model. - **pixel_mask** -- Pixel mask to be fed to a model (when `pad_and_return_pixel_mask=True` or if *"pixel_mask"* is in `self.model_input_names`). - **labels** -- Optional labels to be fed to a model (when `annotations` are provided) """ # Input type checking for clearer error valid_images = False valid_annotations = False valid_masks_path = False # Check that images has a valid type if isinstance(images, (Image.Image, np.ndarray)) or is_torch_tensor(images): valid_images = True elif isinstance(images, (list, tuple)): if len(images) == 0 or isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0]): valid_images = True if not valid_images: raise ValueError( "Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example), " "`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples)." ) is_batched = bool( isinstance(images, (list, tuple)) and (isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0])) ) # Check that annotations has a valid type if annotations is not None: if not is_batched: if self.format == "coco_detection": if isinstance(annotations, dict) and "image_id" in annotations and "annotations" in annotations: if isinstance(annotations["annotations"], (list, tuple)): # an image can have no annotations if len(annotations["annotations"]) == 0 or isinstance(annotations["annotations"][0], dict): valid_annotations = True elif self.format == "coco_panoptic": if isinstance(annotations, dict) and "image_id" in annotations and "segments_info" in annotations: if isinstance(annotations["segments_info"], (list, tuple)): # an image can have no segments (?) if len(annotations["segments_info"]) == 0 or isinstance( annotations["segments_info"][0], dict ): valid_annotations = True else: if isinstance(annotations, (list, tuple)): if len(images) != len(annotations): raise ValueError("There must be as many annotations as there are images") if isinstance(annotations[0], Dict): if self.format == "coco_detection": if isinstance(annotations[0]["annotations"], (list, tuple)): valid_annotations = True elif self.format == "coco_panoptic": if isinstance(annotations[0]["segments_info"], (list, tuple)): valid_annotations = True if not valid_annotations: raise ValueError( """ Annotations must of type `Dict` (single image) or `List[Dict]` (batch of images). In case of object detection, each dictionary should contain the keys 'image_id' and 'annotations', with the latter being a list of annotations in COCO format. In case of panoptic segmentation, each dictionary should contain the keys 'file_name', 'image_id' and 'segments_info', with the latter being a list of annotations in COCO format. """ ) # Check that masks_path has a valid type if masks_path is not None: if self.format == "coco_panoptic": if isinstance(masks_path, pathlib.Path): valid_masks_path = True if not valid_masks_path: raise ValueError( "The path to the directory containing the mask PNG files should be provided as a" " `pathlib.Path` object." ) if not is_batched: images = [images] if annotations is not None: annotations = [annotations] # Create a copy of the list to avoid editing it in place images = [image for image in images] if annotations is not None: annotations = [annotation for annotation in annotations] # prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image) if annotations is not None: for idx, (image, target) in enumerate(zip(images, annotations)): if not isinstance(image, Image.Image): image = self.to_pil_image(image) image, target = self.prepare(image, target, return_segmentation_masks, masks_path) images[idx] = image annotations[idx] = target # transformations (resizing + normalization) if self.do_resize and self.size is not None: if annotations is not None: for idx, (image, target) in enumerate(zip(images, annotations)): image, target = self._resize(image=image, target=target, size=self.size, max_size=self.max_size) images[idx] = image annotations[idx] = target else: for idx, image in enumerate(images): images[idx] = self._resize(image=image, target=None, size=self.size, max_size=self.max_size)[0] if self.do_normalize: if annotations is not None: for idx, (image, target) in enumerate(zip(images, annotations)): image, target = self._normalize( image=image, mean=self.image_mean, std=self.image_std, target=target ) images[idx] = image annotations[idx] = target else: images = [ self._normalize(image=image, mean=self.image_mean, std=self.image_std)[0] for image in images ] else: images = [np.array(image) for image in images] if pad_and_return_pixel_mask: # pad images up to largest image in batch and create pixel_mask max_size = self._max_by_axis([list(image.shape) for image in images]) c, h, w = max_size padded_images = [] pixel_mask = [] for image in images: # create padded image padded_image = np.zeros((c, h, w), dtype=np.float32) padded_image[: image.shape[0], : image.shape[1], : image.shape[2]] = np.copy(image) padded_images.append(padded_image) # create pixel mask mask = np.zeros((h, w), dtype=np.int64) mask[: image.shape[1], : image.shape[2]] = True pixel_mask.append(mask) images = padded_images # return as BatchFeature data = {} data["pixel_values"] = images if pad_and_return_pixel_mask: data["pixel_mask"] = pixel_mask encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) if annotations is not None: # Convert to TensorType tensor_type = return_tensors if not isinstance(tensor_type, TensorType): tensor_type = TensorType(tensor_type) if not tensor_type == TensorType.PYTORCH: raise ValueError("Only PyTorch is supported for the moment.") else: if not is_torch_available(): raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed.") encoded_inputs["labels"] = [ {k: torch.from_numpy(v) for k, v in target.items()} for target in annotations ] return encoded_inputs # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor._max_by_axis def _max_by_axis(self, the_list): # type: (List[List[int]]) -> List[int] maxes = the_list[0] for sublist in the_list[1:]: for index, item in enumerate(sublist): maxes[index] = max(maxes[index], item) return maxes # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.pad_and_create_pixel_mask def pad_and_create_pixel_mask( self, pixel_values_list: List["torch.Tensor"], return_tensors: Optional[Union[str, TensorType]] = None ): """ Pad images up to the largest image in a batch and create a corresponding `pixel_mask`. Args: pixel_values_list (`List[torch.Tensor]`): List of images (pixel values) to be padded. Each image should be a tensor of shape (C, H, W). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **pixel_values** -- Pixel values to be fed to a model. - **pixel_mask** -- Pixel mask to be fed to a model (when `pad_and_return_pixel_mask=True` or if *"pixel_mask"* is in `self.model_input_names`). """ max_size = self._max_by_axis([list(image.shape) for image in pixel_values_list]) c, h, w = max_size padded_images = [] pixel_mask = [] for image in pixel_values_list: # create padded image padded_image = np.zeros((c, h, w), dtype=np.float32) padded_image[: image.shape[0], : image.shape[1], : image.shape[2]] = np.copy(image) padded_images.append(padded_image) # create pixel mask mask = np.zeros((h, w), dtype=np.int64) mask[: image.shape[1], : image.shape[2]] = True pixel_mask.append(mask) # return as BatchFeature data = {"pixel_values": padded_images, "pixel_mask": pixel_mask} encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) return encoded_inputs def post_process(self, outputs, target_sizes): """ Converts the output of [`DeformableDetrForObjectDetection`] into the format expected by the COCO api. Only supports PyTorch. Args: outputs ([`DeformableDetrObjectDetectionOutput`]): Raw outputs of the model. target_sizes (`torch.Tensor` of shape `(batch_size, 2)`): Tensor containing the size (height, width) of each image of the batch. For evaluation, this must be the original image size (before any data augmentation). For visualization, this should be the image size after data augment, but before padding. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ warnings.warn( "`post_process` is deprecated and will be removed in v5 of Transformers, please use" " `post_process_object_detection`.", FutureWarning, ) out_logits, out_bbox = outputs.logits, outputs.pred_boxes if len(out_logits) != len(target_sizes): raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits") if target_sizes.shape[1] != 2: raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch") prob = out_logits.sigmoid() topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1) scores = topk_values topk_boxes = topk_indexes // out_logits.shape[2] labels = topk_indexes % out_logits.shape[2] boxes = center_to_corners_format(out_bbox) boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4)) # and from relative [0, 1] to absolute [0, height] coordinates img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) boxes = boxes * scale_fct[:, None, :] results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)] return results def post_process_object_detection( self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, List[Tuple]] = None ): """ Converts the output of [`DeformableDetrForObjectDetection`] into the format expected by the COCO api. Only supports PyTorch. Args: outputs ([`DetrObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*, defaults to `None`): Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the batch. If left to None, predictions will not be resized. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ out_logits, out_bbox = outputs.logits, outputs.pred_boxes if target_sizes is not None: if len(out_logits) != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) prob = out_logits.sigmoid() topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1) scores = topk_values topk_boxes = topk_indexes // out_logits.shape[2] labels = topk_indexes % out_logits.shape[2] boxes = center_to_corners_format(out_bbox) boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4)) # and from relative [0, 1] to absolute [0, height] coordinates if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) else: img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) boxes = boxes * scale_fct[:, None, :] results = [] for s, l, b in zip(scores, labels, boxes): score = s[s > threshold] label = l[s > threshold] box = b[s > threshold] results.append({"scores": score, "labels": label, "boxes": box}) return results
1
huggingface/transformers
20,304
fix device issue
# What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
ydshieh
"2022-11-17T17:48:16Z"
"2022-11-21T09:12:26Z"
d316037ad71f8748aac9045ffd96970826456a04
8503cc755050c6ed5bc771e3244c29b71be1841e
fix device issue. # What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
./src/transformers/models/detr/feature_extraction_detr.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Feature extractor class for DETR.""" import io import pathlib import warnings from collections import defaultdict from typing import Dict, List, Optional, Set, Tuple, Union import numpy as np from PIL import Image from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...image_transforms import center_to_corners_format, corners_to_center_format, id_to_rgb, rgb_to_id from ...image_utils import ImageFeatureExtractionMixin from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch from torch import nn logger = logging.get_logger(__name__) ImageInput = Union[Image.Image, np.ndarray, "torch.Tensor", List[Image.Image], List[np.ndarray], List["torch.Tensor"]] def masks_to_boxes(masks): """ Compute the bounding boxes around the provided panoptic segmentation masks. The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions. Returns a [N, 4] tensor, with the boxes in corner (xyxy) format. """ if masks.size == 0: return np.zeros((0, 4)) h, w = masks.shape[-2:] y = np.arange(0, h, dtype=np.float32) x = np.arange(0, w, dtype=np.float32) # see https://github.com/pytorch/pytorch/issues/50276 y, x = np.meshgrid(y, x, indexing="ij") x_mask = masks * np.expand_dims(x, axis=0) x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1) x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool))) x_min = x.filled(fill_value=1e8) x_min = x_min.reshape(x_min.shape[0], -1).min(-1) y_mask = masks * np.expand_dims(y, axis=0) y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1) y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool))) y_min = y.filled(fill_value=1e8) y_min = y_min.reshape(y_min.shape[0], -1).min(-1) return np.stack([x_min, y_min, x_max, y_max], 1) def binary_mask_to_rle(mask): """ Args: Converts given binary mask of shape (height, width) to the run-length encoding (RLE) format. mask (`torch.Tensor` or `numpy.array`): A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target segment_id or class_id. Returns: `List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE format. """ if is_torch_tensor(mask): mask = mask.numpy() pixels = mask.flatten() pixels = np.concatenate([[0], pixels, [0]]) runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 runs[1::2] -= runs[::2] return [x for x in runs] def convert_segmentation_to_rle(segmentation): """ Converts given segmentation map of shape (height, width) to the run-length encoding (RLE) format. Args: segmentation (`torch.Tensor` or `numpy.array`): A segmentation map of shape `(height, width)` where each value denotes a segment or class id. Returns: `List[List]`: A list of lists, where each list is the run-length encoding of a segment / class id. """ segment_ids = torch.unique(segmentation) run_length_encodings = [] for idx in segment_ids: mask = torch.where(segmentation == idx, 1, 0) rle = binary_mask_to_rle(mask) run_length_encodings.append(rle) return run_length_encodings def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels): """ Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and `labels`. Args: masks (`torch.Tensor`): A tensor of shape `(num_queries, height, width)`. scores (`torch.Tensor`): A tensor of shape `(num_queries)`. labels (`torch.Tensor`): A tensor of shape `(num_queries)`. object_mask_threshold (`float`): A number between 0 and 1 used to binarize the masks. Raises: `ValueError`: Raised when the first dimension doesn't match in all input tensors. Returns: `Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region < `object_mask_threshold`. """ if not (masks.shape[0] == scores.shape[0] == labels.shape[0]): raise ValueError("mask, scores and labels must have the same shape!") to_keep = labels.ne(num_labels) & (scores > object_mask_threshold) return masks[to_keep], scores[to_keep], labels[to_keep] def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8): # Get the mask associated with the k class mask_k = mask_labels == k mask_k_area = mask_k.sum() # Compute the area of all the stuff in query k original_area = (mask_probs[k] >= mask_threshold).sum() mask_exists = mask_k_area > 0 and original_area > 0 # Eliminate disconnected tiny segments if mask_exists: area_ratio = mask_k_area / original_area if not area_ratio.item() > overlap_mask_area_threshold: mask_exists = False return mask_exists, mask_k def compute_segments( mask_probs, pred_scores, pred_labels, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[Set[int]] = None, target_size: Tuple[int, int] = None, ): height = mask_probs.shape[1] if target_size is None else target_size[0] width = mask_probs.shape[2] if target_size is None else target_size[1] segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device) segments: List[Dict] = [] if target_size is not None: mask_probs = nn.functional.interpolate( mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False )[0] current_segment_id = 0 # Weigh each mask by its prediction score mask_probs *= pred_scores.view(-1, 1, 1) mask_labels = mask_probs.argmax(0) # [height, width] # Keep track of instances of each class stuff_memory_list: Dict[str, int] = {} for k in range(pred_labels.shape[0]): pred_class = pred_labels[k].item() should_fuse = pred_class in label_ids_to_fuse # Check if mask exists and large enough to be a segment mask_exists, mask_k = check_segment_validity( mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold ) if mask_exists: if pred_class in stuff_memory_list: current_segment_id = stuff_memory_list[pred_class] else: current_segment_id += 1 # Add current object segment to final segmentation map segmentation[mask_k] = current_segment_id segment_score = round(pred_scores[k].item(), 6) segments.append( { "id": current_segment_id, "label_id": pred_class, "was_fused": should_fuse, "score": segment_score, } ) if should_fuse: stuff_memory_list[pred_class] = current_segment_id return segmentation, segments class DetrFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin): r""" Constructs a DETR feature extractor. This feature extractor inherits from [`FeatureExtractionMixin`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: format (`str`, *optional*, defaults to `"coco_detection"`): Data format of the annotations. One of "coco_detection" or "coco_panoptic". do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the input to a certain `size`. size (`int`, *optional*, defaults to 800): Resize the input to the given size. Only has an effect if `do_resize` is set to `True`. If size is a sequence like `(width, height)`, output size will be matched to this. If size is an int, smaller edge of the image will be matched to this number. i.e, if `height > width`, then image will be rescaled to `(size * height / width, size)`. max_size (`int`, *optional*, defaults to 1333): The largest size an image dimension can have (otherwise it's capped). Only has an effect if `do_resize` is set to `True`. do_normalize (`bool`, *optional*, defaults to `True`): Whether or not to normalize the input with mean and standard deviation. image_mean (`int`, *optional*, defaults to `[0.485, 0.456, 0.406]`): The sequence of means for each channel, to be used when normalizing images. Defaults to the ImageNet mean. image_std (`int`, *optional*, defaults to `[0.229, 0.224, 0.225]`): The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the ImageNet std. """ model_input_names = ["pixel_values", "pixel_mask"] def __init__( self, format="coco_detection", do_resize=True, size=800, max_size=1333, do_normalize=True, image_mean=None, image_std=None, **kwargs ): super().__init__(**kwargs) self.format = self._is_valid_format(format) self.do_resize = do_resize self.size = size self.max_size = max_size self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else [0.485, 0.456, 0.406] # ImageNet mean self.image_std = image_std if image_std is not None else [0.229, 0.224, 0.225] # ImageNet std def _is_valid_format(self, format): if format not in ["coco_detection", "coco_panoptic"]: raise ValueError(f"Format {format} not supported") return format def prepare(self, image, target, return_segmentation_masks=False, masks_path=None): if self.format == "coco_detection": image, target = self.prepare_coco_detection(image, target, return_segmentation_masks) return image, target elif self.format == "coco_panoptic": image, target = self.prepare_coco_panoptic(image, target, masks_path) return image, target else: raise ValueError(f"Format {self.format} not supported") # inspired by https://github.com/facebookresearch/detr/blob/master/datasets/coco.py#L33 def convert_coco_poly_to_mask(self, segmentations, height, width): try: from pycocotools import mask as coco_mask except ImportError: raise ImportError("Pycocotools is not installed in your environment.") masks = [] for polygons in segmentations: rles = coco_mask.frPyObjects(polygons, height, width) mask = coco_mask.decode(rles) if len(mask.shape) < 3: mask = mask[..., None] mask = np.asarray(mask, dtype=np.uint8) mask = np.any(mask, axis=2) masks.append(mask) if masks: masks = np.stack(masks, axis=0) else: masks = np.zeros((0, height, width), dtype=np.uint8) return masks # inspired by https://github.com/facebookresearch/detr/blob/master/datasets/coco.py#L50 def prepare_coco_detection(self, image, target, return_segmentation_masks=False): """ Convert the target in COCO format into the format expected by DETR. """ w, h = image.size image_id = target["image_id"] image_id = np.asarray([image_id], dtype=np.int64) # get all COCO annotations for the given image anno = target["annotations"] anno = [obj for obj in anno if "iscrowd" not in obj or obj["iscrowd"] == 0] boxes = [obj["bbox"] for obj in anno] # guard against no boxes via resizing boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4) boxes[:, 2:] += boxes[:, :2] boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=w) boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=h) classes = [obj["category_id"] for obj in anno] classes = np.asarray(classes, dtype=np.int64) if return_segmentation_masks: segmentations = [obj["segmentation"] for obj in anno] masks = self.convert_coco_poly_to_mask(segmentations, h, w) keypoints = None if anno and "keypoints" in anno[0]: keypoints = [obj["keypoints"] for obj in anno] keypoints = np.asarray(keypoints, dtype=np.float32) num_keypoints = keypoints.shape[0] if num_keypoints: keypoints = keypoints.reshape((-1, 3)) keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) boxes = boxes[keep] classes = classes[keep] if return_segmentation_masks: masks = masks[keep] if keypoints is not None: keypoints = keypoints[keep] target = {} target["boxes"] = boxes target["class_labels"] = classes if return_segmentation_masks: target["masks"] = masks target["image_id"] = image_id if keypoints is not None: target["keypoints"] = keypoints # for conversion to coco api area = np.asarray([obj["area"] for obj in anno], dtype=np.float32) iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in anno], dtype=np.int64) target["area"] = area[keep] target["iscrowd"] = iscrowd[keep] target["orig_size"] = np.asarray([int(h), int(w)], dtype=np.int64) target["size"] = np.asarray([int(h), int(w)], dtype=np.int64) return image, target def prepare_coco_panoptic(self, image, target, masks_path, return_masks=True): w, h = image.size ann_info = target.copy() ann_path = pathlib.Path(masks_path) / ann_info["file_name"] if "segments_info" in ann_info: masks = np.asarray(Image.open(ann_path), dtype=np.uint32) masks = rgb_to_id(masks) ids = np.array([ann["id"] for ann in ann_info["segments_info"]]) masks = masks == ids[:, None, None] masks = np.asarray(masks, dtype=np.uint8) labels = np.asarray([ann["category_id"] for ann in ann_info["segments_info"]], dtype=np.int64) target = {} target["image_id"] = np.asarray( [ann_info["image_id"] if "image_id" in ann_info else ann_info["id"]], dtype=np.int64 ) if return_masks: target["masks"] = masks target["class_labels"] = labels target["boxes"] = masks_to_boxes(masks) target["size"] = np.asarray([int(h), int(w)], dtype=np.int64) target["orig_size"] = np.asarray([int(h), int(w)], dtype=np.int64) if "segments_info" in ann_info: target["iscrowd"] = np.asarray([ann["iscrowd"] for ann in ann_info["segments_info"]], dtype=np.int64) target["area"] = np.asarray([ann["area"] for ann in ann_info["segments_info"]], dtype=np.float32) return image, target def _resize(self, image, size, target=None, max_size=None): """ Resize the image to the given size. Size can be min_size (scalar) or (w, h) tuple. If size is an int, smaller edge of the image will be matched to this number. If given, also resize the target accordingly. """ if not isinstance(image, Image.Image): image = self.to_pil_image(image) def get_size_with_aspect_ratio(image_size, size, max_size=None): w, h = image_size if max_size is not None: min_original_size = float(min((w, h))) max_original_size = float(max((w, h))) if max_original_size / min_original_size * size > max_size: size = int(round(max_size * min_original_size / max_original_size)) if (w <= h and w == size) or (h <= w and h == size): return (h, w) if w < h: ow = size oh = int(size * h / w) else: oh = size ow = int(size * w / h) return (oh, ow) def get_size(image_size, size, max_size=None): if isinstance(size, (list, tuple)): return size else: # size returned must be (w, h) since we use PIL to resize images # so we revert the tuple return get_size_with_aspect_ratio(image_size, size, max_size)[::-1] size = get_size(image.size, size, max_size) rescaled_image = self.resize(image, size=size) if target is None: return rescaled_image, None ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size)) ratio_width, ratio_height = ratios target = target.copy() if "boxes" in target: boxes = target["boxes"] scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32) target["boxes"] = scaled_boxes if "area" in target: area = target["area"] scaled_area = area * (ratio_width * ratio_height) target["area"] = scaled_area w, h = size target["size"] = np.asarray([h, w], dtype=np.int64) if "masks" in target: # use PyTorch as current workaround # TODO replace by self.resize masks = torch.from_numpy(target["masks"][:, None]).float() interpolated_masks = nn.functional.interpolate(masks, size=(h, w), mode="nearest")[:, 0] > 0.5 target["masks"] = interpolated_masks.numpy() return rescaled_image, target def _normalize(self, image, mean, std, target=None): """ Normalize the image with a certain mean and std. If given, also normalize the target bounding boxes based on the size of the image. """ image = self.normalize(image, mean=mean, std=std) if target is None: return image, None target = target.copy() h, w = image.shape[-2:] if "boxes" in target: boxes = target["boxes"] boxes = corners_to_center_format(boxes) boxes = boxes / np.asarray([w, h, w, h], dtype=np.float32) target["boxes"] = boxes return image, target def __call__( self, images: ImageInput, annotations: Union[List[Dict], List[List[Dict]]] = None, return_segmentation_masks: Optional[bool] = False, masks_path: Optional[pathlib.Path] = None, pad_and_return_pixel_mask: Optional[bool] = True, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ) -> BatchFeature: """ Main method to prepare for the model one or several image(s) and optional annotations. Images are by default padded up to the largest image in a batch, and a pixel mask is created that indicates which pixels are real/which are padding. <Tip warning={true}> NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass PIL images. </Tip> Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. annotations (`Dict`, `List[Dict]`, *optional*): The corresponding annotations in COCO format. In case [`DetrFeatureExtractor`] was initialized with `format = "coco_detection"`, the annotations for each image should have the following format: {'image_id': int, 'annotations': [annotation]}, with the annotations being a list of COCO object annotations. In case [`DetrFeatureExtractor`] was initialized with `format = "coco_panoptic"`, the annotations for each image should have the following format: {'image_id': int, 'file_name': str, 'segments_info': [segment_info]} with segments_info being a list of COCO panoptic annotations. return_segmentation_masks (`Dict`, `List[Dict]`, *optional*, defaults to `False`): Whether to also include instance segmentation masks as part of the labels in case `format = "coco_detection"`. masks_path (`pathlib.Path`, *optional*): Path to the directory containing the PNG files that store the class-agnostic image segmentations. Only relevant in case [`DetrFeatureExtractor`] was initialized with `format = "coco_panoptic"`. pad_and_return_pixel_mask (`bool`, *optional*, defaults to `True`): Whether or not to pad images up to the largest image in a batch and create a pixel mask. If left to the default, will return a pixel mask that is: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **pixel_values** -- Pixel values to be fed to a model. - **pixel_mask** -- Pixel mask to be fed to a model (when `pad_and_return_pixel_mask=True` or if *"pixel_mask"* is in `self.model_input_names`). - **labels** -- Optional labels to be fed to a model (when `annotations` are provided) """ # Input type checking for clearer error valid_images = False valid_annotations = False valid_masks_path = False # Check that images has a valid type if isinstance(images, (Image.Image, np.ndarray)) or is_torch_tensor(images): valid_images = True elif isinstance(images, (list, tuple)): if len(images) == 0 or isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0]): valid_images = True if not valid_images: raise ValueError( "Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example), " "`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples)." ) is_batched = bool( isinstance(images, (list, tuple)) and (isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0])) ) # Check that annotations has a valid type if annotations is not None: if not is_batched: if self.format == "coco_detection": if isinstance(annotations, dict) and "image_id" in annotations and "annotations" in annotations: if isinstance(annotations["annotations"], (list, tuple)): # an image can have no annotations if len(annotations["annotations"]) == 0 or isinstance(annotations["annotations"][0], dict): valid_annotations = True elif self.format == "coco_panoptic": if isinstance(annotations, dict) and "image_id" in annotations and "segments_info" in annotations: if isinstance(annotations["segments_info"], (list, tuple)): # an image can have no segments (?) if len(annotations["segments_info"]) == 0 or isinstance( annotations["segments_info"][0], dict ): valid_annotations = True else: if isinstance(annotations, (list, tuple)): if len(images) != len(annotations): raise ValueError("There must be as many annotations as there are images") if isinstance(annotations[0], Dict): if self.format == "coco_detection": if isinstance(annotations[0]["annotations"], (list, tuple)): valid_annotations = True elif self.format == "coco_panoptic": if isinstance(annotations[0]["segments_info"], (list, tuple)): valid_annotations = True if not valid_annotations: raise ValueError( """ Annotations must of type `Dict` (single image) or `List[Dict]` (batch of images). In case of object detection, each dictionary should contain the keys 'image_id' and 'annotations', with the latter being a list of annotations in COCO format. In case of panoptic segmentation, each dictionary should contain the keys 'file_name', 'image_id' and 'segments_info', with the latter being a list of annotations in COCO format. """ ) # Check that masks_path has a valid type if masks_path is not None: if self.format == "coco_panoptic": if isinstance(masks_path, pathlib.Path): valid_masks_path = True if not valid_masks_path: raise ValueError( "The path to the directory containing the mask PNG files should be provided as a" " `pathlib.Path` object." ) if not is_batched: images = [images] if annotations is not None: annotations = [annotations] # Create a copy of the list to avoid editing it in place images = [image for image in images] if annotations is not None: annotations = [annotation for annotation in annotations] # prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image) if annotations is not None: for idx, (image, target) in enumerate(zip(images, annotations)): if not isinstance(image, Image.Image): image = self.to_pil_image(image) image, target = self.prepare(image, target, return_segmentation_masks, masks_path) images[idx] = image annotations[idx] = target # transformations (resizing + normalization) if self.do_resize and self.size is not None: if annotations is not None: for idx, (image, target) in enumerate(zip(images, annotations)): image, target = self._resize(image=image, target=target, size=self.size, max_size=self.max_size) images[idx] = image annotations[idx] = target else: for idx, image in enumerate(images): images[idx] = self._resize(image=image, target=None, size=self.size, max_size=self.max_size)[0] if self.do_normalize: if annotations is not None: for idx, (image, target) in enumerate(zip(images, annotations)): image, target = self._normalize( image=image, mean=self.image_mean, std=self.image_std, target=target ) images[idx] = image annotations[idx] = target else: images = [ self._normalize(image=image, mean=self.image_mean, std=self.image_std)[0] for image in images ] else: images = [np.array(image) for image in images] if pad_and_return_pixel_mask: # pad images up to largest image in batch and create pixel_mask max_size = self._max_by_axis([list(image.shape) for image in images]) c, h, w = max_size padded_images = [] pixel_mask = [] for image in images: # create padded image padded_image = np.zeros((c, h, w), dtype=np.float32) padded_image[: image.shape[0], : image.shape[1], : image.shape[2]] = np.copy(image) padded_images.append(padded_image) # create pixel mask mask = np.zeros((h, w), dtype=np.int64) mask[: image.shape[1], : image.shape[2]] = True pixel_mask.append(mask) images = padded_images # return as BatchFeature data = {} data["pixel_values"] = images if pad_and_return_pixel_mask: data["pixel_mask"] = pixel_mask encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) if annotations is not None: # Convert to TensorType tensor_type = return_tensors if not isinstance(tensor_type, TensorType): tensor_type = TensorType(tensor_type) if not tensor_type == TensorType.PYTORCH: raise ValueError("Only PyTorch is supported for the moment.") else: if not is_torch_available(): raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed.") encoded_inputs["labels"] = [ {k: torch.from_numpy(v) for k, v in target.items()} for target in annotations ] return encoded_inputs def _max_by_axis(self, the_list): # type: (List[List[int]]) -> List[int] maxes = the_list[0] for sublist in the_list[1:]: for index, item in enumerate(sublist): maxes[index] = max(maxes[index], item) return maxes def pad_and_create_pixel_mask( self, pixel_values_list: List["torch.Tensor"], return_tensors: Optional[Union[str, TensorType]] = None ): """ Pad images up to the largest image in a batch and create a corresponding `pixel_mask`. Args: pixel_values_list (`List[torch.Tensor]`): List of images (pixel values) to be padded. Each image should be a tensor of shape (C, H, W). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **pixel_values** -- Pixel values to be fed to a model. - **pixel_mask** -- Pixel mask to be fed to a model (when `pad_and_return_pixel_mask=True` or if *"pixel_mask"* is in `self.model_input_names`). """ max_size = self._max_by_axis([list(image.shape) for image in pixel_values_list]) c, h, w = max_size padded_images = [] pixel_mask = [] for image in pixel_values_list: # create padded image padded_image = np.zeros((c, h, w), dtype=np.float32) padded_image[: image.shape[0], : image.shape[1], : image.shape[2]] = np.copy(image) padded_images.append(padded_image) # create pixel mask mask = np.zeros((h, w), dtype=np.int64) mask[: image.shape[1], : image.shape[2]] = True pixel_mask.append(mask) # return as BatchFeature data = {"pixel_values": padded_images, "pixel_mask": pixel_mask} encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) return encoded_inputs # POSTPROCESSING METHODS # inspired by https://github.com/facebookresearch/detr/blob/master/models/detr.py#L258 def post_process(self, outputs, target_sizes): """ Converts the output of [`DetrForObjectDetection`] into the format expected by the COCO api. Only supports PyTorch. Args: outputs ([`DetrObjectDetectionOutput`]): Raw outputs of the model. target_sizes (`torch.Tensor` of shape `(batch_size, 2)`): Tensor containing the size (height, width) of each image of the batch. For evaluation, this must be the original image size (before any data augmentation). For visualization, this should be the image size after data augment, but before padding. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ warnings.warn( "`post_process` is deprecated and will be removed in v5 of Transformers, please use" " `post_process_object_detection`", FutureWarning, ) out_logits, out_bbox = outputs.logits, outputs.pred_boxes if len(out_logits) != len(target_sizes): raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits") if target_sizes.shape[1] != 2: raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch") prob = nn.functional.softmax(out_logits, -1) scores, labels = prob[..., :-1].max(-1) # convert to [x0, y0, x1, y1] format boxes = center_to_corners_format(out_bbox) # and from relative [0, 1] to absolute [0, height] coordinates img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) boxes = boxes * scale_fct[:, None, :] results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)] return results def post_process_segmentation(self, outputs, target_sizes, threshold=0.9, mask_threshold=0.5): """ Converts the output of [`DetrForSegmentation`] into image segmentation predictions. Only supports PyTorch. Args: outputs ([`DetrSegmentationOutput`]): Raw outputs of the model. target_sizes (`torch.Tensor` of shape `(batch_size, 2)` or `List[Tuple]` of length `batch_size`): Torch Tensor (or list) corresponding to the requested final size (h, w) of each prediction. threshold (`float`, *optional*, defaults to 0.9): Threshold to use to filter out queries. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels, and masks for an image in the batch as predicted by the model. """ warnings.warn( "`post_process_segmentation` is deprecated and will be removed in v5 of Transformers, please use" " `post_process_semantic_segmentation`.", FutureWarning, ) out_logits, raw_masks = outputs.logits, outputs.pred_masks preds = [] def to_tuple(tup): if isinstance(tup, tuple): return tup return tuple(tup.cpu().tolist()) for cur_logits, cur_masks, size in zip(out_logits, raw_masks, target_sizes): # we filter empty queries and detection below threshold scores, labels = cur_logits.softmax(-1).max(-1) keep = labels.ne(outputs.logits.shape[-1] - 1) & (scores > threshold) cur_scores, cur_classes = cur_logits.softmax(-1).max(-1) cur_scores = cur_scores[keep] cur_classes = cur_classes[keep] cur_masks = cur_masks[keep] cur_masks = nn.functional.interpolate(cur_masks[:, None], to_tuple(size), mode="bilinear").squeeze(1) cur_masks = (cur_masks.sigmoid() > mask_threshold) * 1 predictions = {"scores": cur_scores, "labels": cur_classes, "masks": cur_masks} preds.append(predictions) return preds # inspired by https://github.com/facebookresearch/detr/blob/master/models/segmentation.py#L218 def post_process_instance(self, results, outputs, orig_target_sizes, max_target_sizes, threshold=0.5): """ Converts the output of [`DetrForSegmentation`] into actual instance segmentation predictions. Only supports PyTorch. Args: results (`List[Dict]`): Results list obtained by [`~DetrFeatureExtractor.post_process`], to which "masks" results will be added. outputs ([`DetrSegmentationOutput`]): Raw outputs of the model. orig_target_sizes (`torch.Tensor` of shape `(batch_size, 2)`): Tensor containing the size (h, w) of each image of the batch. For evaluation, this must be the original image size (before any data augmentation). max_target_sizes (`torch.Tensor` of shape `(batch_size, 2)`): Tensor containing the maximum size (h, w) of each image of the batch. For evaluation, this must be the original image size (before any data augmentation). threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels, boxes and masks for an image in the batch as predicted by the model. """ warnings.warn( "`post_process_instance` is deprecated and will be removed in v5 of Transformers, please use" " `post_process_instance_segmentation`.", FutureWarning, ) if len(orig_target_sizes) != len(max_target_sizes): raise ValueError("Make sure to pass in as many orig_target_sizes as max_target_sizes") max_h, max_w = max_target_sizes.max(0)[0].tolist() outputs_masks = outputs.pred_masks.squeeze(2) outputs_masks = nn.functional.interpolate( outputs_masks, size=(max_h, max_w), mode="bilinear", align_corners=False ) outputs_masks = (outputs_masks.sigmoid() > threshold).cpu() for i, (cur_mask, t, tt) in enumerate(zip(outputs_masks, max_target_sizes, orig_target_sizes)): img_h, img_w = t[0], t[1] results[i]["masks"] = cur_mask[:, :img_h, :img_w].unsqueeze(1) results[i]["masks"] = nn.functional.interpolate( results[i]["masks"].float(), size=tuple(tt.tolist()), mode="nearest" ).byte() return results # inspired by https://github.com/facebookresearch/detr/blob/master/models/segmentation.py#L241 def post_process_panoptic(self, outputs, processed_sizes, target_sizes=None, is_thing_map=None, threshold=0.85): """ Converts the output of [`DetrForSegmentation`] into actual panoptic predictions. Only supports PyTorch. Args: outputs ([`DetrSegmentationOutput`]): Raw outputs of the model. processed_sizes (`torch.Tensor` of shape `(batch_size, 2)` or `List[Tuple]` of length `batch_size`): Torch Tensor (or list) containing the size (h, w) of each image of the batch, i.e. the size after data augmentation but before batching. target_sizes (`torch.Tensor` of shape `(batch_size, 2)` or `List[Tuple]` of length `batch_size`, *optional*): Torch Tensor (or list) corresponding to the requested final size (h, w) of each prediction. If left to None, it will default to the `processed_sizes`. is_thing_map (`torch.Tensor` of shape `(batch_size, 2)`, *optional*): Dictionary mapping class indices to either True or False, depending on whether or not they are a thing. If not set, defaults to the `is_thing_map` of COCO panoptic. threshold (`float`, *optional*, defaults to 0.85): Threshold to use to filter out queries. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing a PNG string and segments_info values for an image in the batch as predicted by the model. """ warnings.warn( "`post_process_panoptic is deprecated and will be removed in v5 of Transformers, please use" " `post_process_panoptic_segmentation`.", FutureWarning, ) if target_sizes is None: target_sizes = processed_sizes if len(processed_sizes) != len(target_sizes): raise ValueError("Make sure to pass in as many processed_sizes as target_sizes") if is_thing_map is None: # default to is_thing_map of COCO panoptic is_thing_map = {i: i <= 90 for i in range(201)} out_logits, raw_masks, raw_boxes = outputs.logits, outputs.pred_masks, outputs.pred_boxes if not len(out_logits) == len(raw_masks) == len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits and masks" ) preds = [] def to_tuple(tup): if isinstance(tup, tuple): return tup return tuple(tup.cpu().tolist()) for cur_logits, cur_masks, cur_boxes, size, target_size in zip( out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes ): # we filter empty queries and detection below threshold scores, labels = cur_logits.softmax(-1).max(-1) keep = labels.ne(outputs.logits.shape[-1] - 1) & (scores > threshold) cur_scores, cur_classes = cur_logits.softmax(-1).max(-1) cur_scores = cur_scores[keep] cur_classes = cur_classes[keep] cur_masks = cur_masks[keep] cur_masks = nn.functional.interpolate(cur_masks[:, None], to_tuple(size), mode="bilinear").squeeze(1) cur_boxes = center_to_corners_format(cur_boxes[keep]) h, w = cur_masks.shape[-2:] if len(cur_boxes) != len(cur_classes): raise ValueError("Not as many boxes as there are classes") # It may be that we have several predicted masks for the same stuff class. # In the following, we track the list of masks ids for each stuff class (they are merged later on) cur_masks = cur_masks.flatten(1) stuff_equiv_classes = defaultdict(lambda: []) for k, label in enumerate(cur_classes): if not is_thing_map[label.item()]: stuff_equiv_classes[label.item()].append(k) def get_ids_area(masks, scores, dedup=False): # This helper function creates the final panoptic segmentation image # It also returns the area of the masks that appears on the image m_id = masks.transpose(0, 1).softmax(-1) if m_id.shape[-1] == 0: # We didn't detect any mask :( m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device) else: m_id = m_id.argmax(-1).view(h, w) if dedup: # Merge the masks corresponding to the same stuff class for equiv in stuff_equiv_classes.values(): if len(equiv) > 1: for eq_id in equiv: m_id.masked_fill_(m_id.eq(eq_id), equiv[0]) final_h, final_w = to_tuple(target_size) seg_img = Image.fromarray(id_to_rgb(m_id.view(h, w).cpu().numpy())) seg_img = seg_img.resize(size=(final_w, final_h), resample=Image.NEAREST) np_seg_img = torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes())) np_seg_img = np_seg_img.view(final_h, final_w, 3) np_seg_img = np_seg_img.numpy() m_id = torch.from_numpy(rgb_to_id(np_seg_img)) area = [] for i in range(len(scores)): area.append(m_id.eq(i).sum().item()) return area, seg_img area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True) if cur_classes.numel() > 0: # We know filter empty masks as long as we find some while True: filtered_small = torch.as_tensor( [area[i] <= 4 for i, c in enumerate(cur_classes)], dtype=torch.bool, device=keep.device ) if filtered_small.any().item(): cur_scores = cur_scores[~filtered_small] cur_classes = cur_classes[~filtered_small] cur_masks = cur_masks[~filtered_small] area, seg_img = get_ids_area(cur_masks, cur_scores) else: break else: cur_classes = torch.ones(1, dtype=torch.long, device=cur_classes.device) segments_info = [] for i, a in enumerate(area): cat = cur_classes[i].item() segments_info.append({"id": i, "isthing": is_thing_map[cat], "category_id": cat, "area": a}) del cur_classes with io.BytesIO() as out: seg_img.save(out, format="PNG") predictions = {"png_string": out.getvalue(), "segments_info": segments_info} preds.append(predictions) return preds def post_process_object_detection( self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, List[Tuple]] = None ): """ Converts the output of [`DetrForObjectDetection`] into the format expected by the COCO api. Only supports PyTorch. Args: outputs ([`DetrObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*, defaults to `None`): Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the batch. If left to None, predictions will not be resized. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ out_logits, out_bbox = outputs.logits, outputs.pred_boxes if target_sizes is not None: if len(out_logits) != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) prob = nn.functional.softmax(out_logits, -1) scores, labels = prob[..., :-1].max(-1) # Convert to [x0, y0, x1, y1] format boxes = center_to_corners_format(out_bbox) # Convert from relative [0, 1] to absolute [0, height] coordinates if target_sizes is not None: if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) else: img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) boxes = boxes * scale_fct[:, None, :] results = [] for s, l, b in zip(scores, labels, boxes): score = s[s > threshold] label = l[s > threshold] box = b[s > threshold] results.append({"scores": score, "labels": label, "boxes": box}) return results def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple[int, int]] = None): """ Converts the output of [`DetrForSegmentation`] into semantic segmentation maps. Only supports PyTorch. Args: outputs ([`DetrForSegmentation`]): Raw outputs of the model. target_sizes (`List[Tuple[int, int]]`, *optional*, defaults to `None`): A list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the batch. If left to None, predictions will not be resized. Returns: `List[torch.Tensor]`: A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each `torch.Tensor` correspond to a semantic class id. """ class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width] # Remove the null class `[..., :-1]` masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1] masks_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Semantic segmentation logits of shape (batch_size, num_classes, height, width) segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs) batch_size = class_queries_logits.shape[0] # Resize logits and compute semantic segmentation maps if target_sizes is not None: if batch_size != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) semantic_segmentation = [] for idx in range(batch_size): resized_logits = nn.functional.interpolate( segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False ) semantic_map = resized_logits[0].argmax(dim=0) semantic_segmentation.append(semantic_map) else: semantic_segmentation = segmentation.argmax(dim=1) semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation def post_process_instance_segmentation( self, outputs, threshold: float = 0.5, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, target_sizes: Optional[List[Tuple[int, int]]] = None, return_coco_annotation: Optional[bool] = False, ) -> List[Dict]: """ Converts the output of [`DetrForSegmentation`] into instance segmentation predictions. Only supports PyTorch. Args: outputs ([`DetrForSegmentation`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. target_sizes (`List[Tuple]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction. If left to None, predictions will not be resized. return_coco_annotation (`bool`, *optional*): Defaults to `False`. If set to `True`, segmentation maps are returned in COCO run-length encoding (RLE) format. Returns: `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- A tensor of shape `(height, width)` where each pixel represents a `segment_id` or `List[List]` run-length encoding (RLE) of the segmentation map if return_coco_annotation is set to `True`. Set to `None` if no mask if found above `threshold`. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- An integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **score** -- Prediction score of segment with `segment_id`. """ class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width] batch_size = class_queries_logits.shape[0] num_labels = class_queries_logits.shape[-1] - 1 mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Predicted label and score of each query (batch_size, num_queries) pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1) # Loop over items in batch size results: List[Dict[str, TensorType]] = [] for i in range(batch_size): mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects( mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels ) # No mask found if mask_probs_item.shape[0] <= 0: height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:] segmentation = torch.zeros((height, width)) - 1 results.append({"segmentation": segmentation, "segments_info": []}) continue # Get segmentation map and segment information of batch item target_size = target_sizes[i] if target_sizes is not None else None segmentation, segments = compute_segments( mask_probs=mask_probs_item, pred_scores=pred_scores_item, pred_labels=pred_labels_item, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, label_ids_to_fuse=[], target_size=target_size, ) # Return segmentation map in run-length encoding (RLE) format if return_coco_annotation: segmentation = convert_segmentation_to_rle(segmentation) results.append({"segmentation": segmentation, "segments_info": segments}) return results def post_process_panoptic_segmentation( self, outputs, threshold: float = 0.5, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[Set[int]] = None, target_sizes: Optional[List[Tuple[int, int]]] = None, ) -> List[Dict]: """ Converts the output of [`DetrForSegmentation`] into image panoptic segmentation predictions. Only supports PyTorch. Args: outputs ([`DetrForSegmentation`]): The outputs from [`DetrForSegmentation`]. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. label_ids_to_fuse (`Set[int]`, *optional*): The labels in this state will have all their instances be fused together. For instance we could say there can only be one sky in an image, but several persons, so the label ID for sky would be in that set, but not the one for person. target_sizes (`List[Tuple]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction in batch. If left to None, predictions will not be resized. Returns: `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id` or `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized to the corresponding `target_sizes` entry. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- an integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise. Multiple instances of the same class / label were fused and assigned a single `segment_id`. - **score** -- Prediction score of segment with `segment_id`. """ if label_ids_to_fuse is None: warnings.warn("`label_ids_to_fuse` unset. No instance will be fused.") label_ids_to_fuse = set() class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width] batch_size = class_queries_logits.shape[0] num_labels = class_queries_logits.shape[-1] - 1 mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Predicted label and score of each query (batch_size, num_queries) pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1) # Loop over items in batch size results: List[Dict[str, TensorType]] = [] for i in range(batch_size): mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects( mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels ) # No mask found if mask_probs_item.shape[0] <= 0: height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:] segmentation = torch.zeros((height, width)) - 1 results.append({"segmentation": segmentation, "segments_info": []}) continue # Get segmentation map and segment information of batch item target_size = target_sizes[i] if target_sizes is not None else None segmentation, segments = compute_segments( mask_probs=mask_probs_item, pred_scores=pred_scores_item, pred_labels=pred_labels_item, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, label_ids_to_fuse=label_ids_to_fuse, target_size=target_size, ) results.append({"segmentation": segmentation, "segments_info": segments}) return results
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Feature extractor class for DETR.""" import io import pathlib import warnings from collections import defaultdict from typing import Dict, List, Optional, Set, Tuple, Union import numpy as np from PIL import Image from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...image_transforms import center_to_corners_format, corners_to_center_format, id_to_rgb, rgb_to_id from ...image_utils import ImageFeatureExtractionMixin from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch from torch import nn logger = logging.get_logger(__name__) ImageInput = Union[Image.Image, np.ndarray, "torch.Tensor", List[Image.Image], List[np.ndarray], List["torch.Tensor"]] def masks_to_boxes(masks): """ Compute the bounding boxes around the provided panoptic segmentation masks. The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions. Returns a [N, 4] tensor, with the boxes in corner (xyxy) format. """ if masks.size == 0: return np.zeros((0, 4)) h, w = masks.shape[-2:] y = np.arange(0, h, dtype=np.float32) x = np.arange(0, w, dtype=np.float32) # see https://github.com/pytorch/pytorch/issues/50276 y, x = np.meshgrid(y, x, indexing="ij") x_mask = masks * np.expand_dims(x, axis=0) x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1) x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool))) x_min = x.filled(fill_value=1e8) x_min = x_min.reshape(x_min.shape[0], -1).min(-1) y_mask = masks * np.expand_dims(y, axis=0) y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1) y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool))) y_min = y.filled(fill_value=1e8) y_min = y_min.reshape(y_min.shape[0], -1).min(-1) return np.stack([x_min, y_min, x_max, y_max], 1) def binary_mask_to_rle(mask): """ Args: Converts given binary mask of shape (height, width) to the run-length encoding (RLE) format. mask (`torch.Tensor` or `numpy.array`): A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target segment_id or class_id. Returns: `List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE format. """ if is_torch_tensor(mask): mask = mask.numpy() pixels = mask.flatten() pixels = np.concatenate([[0], pixels, [0]]) runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 runs[1::2] -= runs[::2] return [x for x in runs] def convert_segmentation_to_rle(segmentation): """ Converts given segmentation map of shape (height, width) to the run-length encoding (RLE) format. Args: segmentation (`torch.Tensor` or `numpy.array`): A segmentation map of shape `(height, width)` where each value denotes a segment or class id. Returns: `List[List]`: A list of lists, where each list is the run-length encoding of a segment / class id. """ segment_ids = torch.unique(segmentation) run_length_encodings = [] for idx in segment_ids: mask = torch.where(segmentation == idx, 1, 0) rle = binary_mask_to_rle(mask) run_length_encodings.append(rle) return run_length_encodings def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels): """ Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and `labels`. Args: masks (`torch.Tensor`): A tensor of shape `(num_queries, height, width)`. scores (`torch.Tensor`): A tensor of shape `(num_queries)`. labels (`torch.Tensor`): A tensor of shape `(num_queries)`. object_mask_threshold (`float`): A number between 0 and 1 used to binarize the masks. Raises: `ValueError`: Raised when the first dimension doesn't match in all input tensors. Returns: `Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region < `object_mask_threshold`. """ if not (masks.shape[0] == scores.shape[0] == labels.shape[0]): raise ValueError("mask, scores and labels must have the same shape!") to_keep = labels.ne(num_labels) & (scores > object_mask_threshold) return masks[to_keep], scores[to_keep], labels[to_keep] def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8): # Get the mask associated with the k class mask_k = mask_labels == k mask_k_area = mask_k.sum() # Compute the area of all the stuff in query k original_area = (mask_probs[k] >= mask_threshold).sum() mask_exists = mask_k_area > 0 and original_area > 0 # Eliminate disconnected tiny segments if mask_exists: area_ratio = mask_k_area / original_area if not area_ratio.item() > overlap_mask_area_threshold: mask_exists = False return mask_exists, mask_k def compute_segments( mask_probs, pred_scores, pred_labels, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[Set[int]] = None, target_size: Tuple[int, int] = None, ): height = mask_probs.shape[1] if target_size is None else target_size[0] width = mask_probs.shape[2] if target_size is None else target_size[1] segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device) segments: List[Dict] = [] if target_size is not None: mask_probs = nn.functional.interpolate( mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False )[0] current_segment_id = 0 # Weigh each mask by its prediction score mask_probs *= pred_scores.view(-1, 1, 1) mask_labels = mask_probs.argmax(0) # [height, width] # Keep track of instances of each class stuff_memory_list: Dict[str, int] = {} for k in range(pred_labels.shape[0]): pred_class = pred_labels[k].item() should_fuse = pred_class in label_ids_to_fuse # Check if mask exists and large enough to be a segment mask_exists, mask_k = check_segment_validity( mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold ) if mask_exists: if pred_class in stuff_memory_list: current_segment_id = stuff_memory_list[pred_class] else: current_segment_id += 1 # Add current object segment to final segmentation map segmentation[mask_k] = current_segment_id segment_score = round(pred_scores[k].item(), 6) segments.append( { "id": current_segment_id, "label_id": pred_class, "was_fused": should_fuse, "score": segment_score, } ) if should_fuse: stuff_memory_list[pred_class] = current_segment_id return segmentation, segments class DetrFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin): r""" Constructs a DETR feature extractor. This feature extractor inherits from [`FeatureExtractionMixin`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: format (`str`, *optional*, defaults to `"coco_detection"`): Data format of the annotations. One of "coco_detection" or "coco_panoptic". do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the input to a certain `size`. size (`int`, *optional*, defaults to 800): Resize the input to the given size. Only has an effect if `do_resize` is set to `True`. If size is a sequence like `(width, height)`, output size will be matched to this. If size is an int, smaller edge of the image will be matched to this number. i.e, if `height > width`, then image will be rescaled to `(size * height / width, size)`. max_size (`int`, *optional*, defaults to 1333): The largest size an image dimension can have (otherwise it's capped). Only has an effect if `do_resize` is set to `True`. do_normalize (`bool`, *optional*, defaults to `True`): Whether or not to normalize the input with mean and standard deviation. image_mean (`int`, *optional*, defaults to `[0.485, 0.456, 0.406]`): The sequence of means for each channel, to be used when normalizing images. Defaults to the ImageNet mean. image_std (`int`, *optional*, defaults to `[0.229, 0.224, 0.225]`): The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the ImageNet std. """ model_input_names = ["pixel_values", "pixel_mask"] def __init__( self, format="coco_detection", do_resize=True, size=800, max_size=1333, do_normalize=True, image_mean=None, image_std=None, **kwargs ): super().__init__(**kwargs) self.format = self._is_valid_format(format) self.do_resize = do_resize self.size = size self.max_size = max_size self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else [0.485, 0.456, 0.406] # ImageNet mean self.image_std = image_std if image_std is not None else [0.229, 0.224, 0.225] # ImageNet std def _is_valid_format(self, format): if format not in ["coco_detection", "coco_panoptic"]: raise ValueError(f"Format {format} not supported") return format def prepare(self, image, target, return_segmentation_masks=False, masks_path=None): if self.format == "coco_detection": image, target = self.prepare_coco_detection(image, target, return_segmentation_masks) return image, target elif self.format == "coco_panoptic": image, target = self.prepare_coco_panoptic(image, target, masks_path) return image, target else: raise ValueError(f"Format {self.format} not supported") # inspired by https://github.com/facebookresearch/detr/blob/master/datasets/coco.py#L33 def convert_coco_poly_to_mask(self, segmentations, height, width): try: from pycocotools import mask as coco_mask except ImportError: raise ImportError("Pycocotools is not installed in your environment.") masks = [] for polygons in segmentations: rles = coco_mask.frPyObjects(polygons, height, width) mask = coco_mask.decode(rles) if len(mask.shape) < 3: mask = mask[..., None] mask = np.asarray(mask, dtype=np.uint8) mask = np.any(mask, axis=2) masks.append(mask) if masks: masks = np.stack(masks, axis=0) else: masks = np.zeros((0, height, width), dtype=np.uint8) return masks # inspired by https://github.com/facebookresearch/detr/blob/master/datasets/coco.py#L50 def prepare_coco_detection(self, image, target, return_segmentation_masks=False): """ Convert the target in COCO format into the format expected by DETR. """ w, h = image.size image_id = target["image_id"] image_id = np.asarray([image_id], dtype=np.int64) # get all COCO annotations for the given image anno = target["annotations"] anno = [obj for obj in anno if "iscrowd" not in obj or obj["iscrowd"] == 0] boxes = [obj["bbox"] for obj in anno] # guard against no boxes via resizing boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4) boxes[:, 2:] += boxes[:, :2] boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=w) boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=h) classes = [obj["category_id"] for obj in anno] classes = np.asarray(classes, dtype=np.int64) if return_segmentation_masks: segmentations = [obj["segmentation"] for obj in anno] masks = self.convert_coco_poly_to_mask(segmentations, h, w) keypoints = None if anno and "keypoints" in anno[0]: keypoints = [obj["keypoints"] for obj in anno] keypoints = np.asarray(keypoints, dtype=np.float32) num_keypoints = keypoints.shape[0] if num_keypoints: keypoints = keypoints.reshape((-1, 3)) keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) boxes = boxes[keep] classes = classes[keep] if return_segmentation_masks: masks = masks[keep] if keypoints is not None: keypoints = keypoints[keep] target = {} target["boxes"] = boxes target["class_labels"] = classes if return_segmentation_masks: target["masks"] = masks target["image_id"] = image_id if keypoints is not None: target["keypoints"] = keypoints # for conversion to coco api area = np.asarray([obj["area"] for obj in anno], dtype=np.float32) iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in anno], dtype=np.int64) target["area"] = area[keep] target["iscrowd"] = iscrowd[keep] target["orig_size"] = np.asarray([int(h), int(w)], dtype=np.int64) target["size"] = np.asarray([int(h), int(w)], dtype=np.int64) return image, target def prepare_coco_panoptic(self, image, target, masks_path, return_masks=True): w, h = image.size ann_info = target.copy() ann_path = pathlib.Path(masks_path) / ann_info["file_name"] if "segments_info" in ann_info: masks = np.asarray(Image.open(ann_path), dtype=np.uint32) masks = rgb_to_id(masks) ids = np.array([ann["id"] for ann in ann_info["segments_info"]]) masks = masks == ids[:, None, None] masks = np.asarray(masks, dtype=np.uint8) labels = np.asarray([ann["category_id"] for ann in ann_info["segments_info"]], dtype=np.int64) target = {} target["image_id"] = np.asarray( [ann_info["image_id"] if "image_id" in ann_info else ann_info["id"]], dtype=np.int64 ) if return_masks: target["masks"] = masks target["class_labels"] = labels target["boxes"] = masks_to_boxes(masks) target["size"] = np.asarray([int(h), int(w)], dtype=np.int64) target["orig_size"] = np.asarray([int(h), int(w)], dtype=np.int64) if "segments_info" in ann_info: target["iscrowd"] = np.asarray([ann["iscrowd"] for ann in ann_info["segments_info"]], dtype=np.int64) target["area"] = np.asarray([ann["area"] for ann in ann_info["segments_info"]], dtype=np.float32) return image, target def _resize(self, image, size, target=None, max_size=None): """ Resize the image to the given size. Size can be min_size (scalar) or (w, h) tuple. If size is an int, smaller edge of the image will be matched to this number. If given, also resize the target accordingly. """ if not isinstance(image, Image.Image): image = self.to_pil_image(image) def get_size_with_aspect_ratio(image_size, size, max_size=None): w, h = image_size if max_size is not None: min_original_size = float(min((w, h))) max_original_size = float(max((w, h))) if max_original_size / min_original_size * size > max_size: size = int(round(max_size * min_original_size / max_original_size)) if (w <= h and w == size) or (h <= w and h == size): return (h, w) if w < h: ow = size oh = int(size * h / w) else: oh = size ow = int(size * w / h) return (oh, ow) def get_size(image_size, size, max_size=None): if isinstance(size, (list, tuple)): return size else: # size returned must be (w, h) since we use PIL to resize images # so we revert the tuple return get_size_with_aspect_ratio(image_size, size, max_size)[::-1] size = get_size(image.size, size, max_size) rescaled_image = self.resize(image, size=size) if target is None: return rescaled_image, None ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size)) ratio_width, ratio_height = ratios target = target.copy() if "boxes" in target: boxes = target["boxes"] scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32) target["boxes"] = scaled_boxes if "area" in target: area = target["area"] scaled_area = area * (ratio_width * ratio_height) target["area"] = scaled_area w, h = size target["size"] = np.asarray([h, w], dtype=np.int64) if "masks" in target: # use PyTorch as current workaround # TODO replace by self.resize masks = torch.from_numpy(target["masks"][:, None]).float() interpolated_masks = nn.functional.interpolate(masks, size=(h, w), mode="nearest")[:, 0] > 0.5 target["masks"] = interpolated_masks.numpy() return rescaled_image, target def _normalize(self, image, mean, std, target=None): """ Normalize the image with a certain mean and std. If given, also normalize the target bounding boxes based on the size of the image. """ image = self.normalize(image, mean=mean, std=std) if target is None: return image, None target = target.copy() h, w = image.shape[-2:] if "boxes" in target: boxes = target["boxes"] boxes = corners_to_center_format(boxes) boxes = boxes / np.asarray([w, h, w, h], dtype=np.float32) target["boxes"] = boxes return image, target def __call__( self, images: ImageInput, annotations: Union[List[Dict], List[List[Dict]]] = None, return_segmentation_masks: Optional[bool] = False, masks_path: Optional[pathlib.Path] = None, pad_and_return_pixel_mask: Optional[bool] = True, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ) -> BatchFeature: """ Main method to prepare for the model one or several image(s) and optional annotations. Images are by default padded up to the largest image in a batch, and a pixel mask is created that indicates which pixels are real/which are padding. <Tip warning={true}> NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass PIL images. </Tip> Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. annotations (`Dict`, `List[Dict]`, *optional*): The corresponding annotations in COCO format. In case [`DetrFeatureExtractor`] was initialized with `format = "coco_detection"`, the annotations for each image should have the following format: {'image_id': int, 'annotations': [annotation]}, with the annotations being a list of COCO object annotations. In case [`DetrFeatureExtractor`] was initialized with `format = "coco_panoptic"`, the annotations for each image should have the following format: {'image_id': int, 'file_name': str, 'segments_info': [segment_info]} with segments_info being a list of COCO panoptic annotations. return_segmentation_masks (`Dict`, `List[Dict]`, *optional*, defaults to `False`): Whether to also include instance segmentation masks as part of the labels in case `format = "coco_detection"`. masks_path (`pathlib.Path`, *optional*): Path to the directory containing the PNG files that store the class-agnostic image segmentations. Only relevant in case [`DetrFeatureExtractor`] was initialized with `format = "coco_panoptic"`. pad_and_return_pixel_mask (`bool`, *optional*, defaults to `True`): Whether or not to pad images up to the largest image in a batch and create a pixel mask. If left to the default, will return a pixel mask that is: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **pixel_values** -- Pixel values to be fed to a model. - **pixel_mask** -- Pixel mask to be fed to a model (when `pad_and_return_pixel_mask=True` or if *"pixel_mask"* is in `self.model_input_names`). - **labels** -- Optional labels to be fed to a model (when `annotations` are provided) """ # Input type checking for clearer error valid_images = False valid_annotations = False valid_masks_path = False # Check that images has a valid type if isinstance(images, (Image.Image, np.ndarray)) or is_torch_tensor(images): valid_images = True elif isinstance(images, (list, tuple)): if len(images) == 0 or isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0]): valid_images = True if not valid_images: raise ValueError( "Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example), " "`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples)." ) is_batched = bool( isinstance(images, (list, tuple)) and (isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0])) ) # Check that annotations has a valid type if annotations is not None: if not is_batched: if self.format == "coco_detection": if isinstance(annotations, dict) and "image_id" in annotations and "annotations" in annotations: if isinstance(annotations["annotations"], (list, tuple)): # an image can have no annotations if len(annotations["annotations"]) == 0 or isinstance(annotations["annotations"][0], dict): valid_annotations = True elif self.format == "coco_panoptic": if isinstance(annotations, dict) and "image_id" in annotations and "segments_info" in annotations: if isinstance(annotations["segments_info"], (list, tuple)): # an image can have no segments (?) if len(annotations["segments_info"]) == 0 or isinstance( annotations["segments_info"][0], dict ): valid_annotations = True else: if isinstance(annotations, (list, tuple)): if len(images) != len(annotations): raise ValueError("There must be as many annotations as there are images") if isinstance(annotations[0], Dict): if self.format == "coco_detection": if isinstance(annotations[0]["annotations"], (list, tuple)): valid_annotations = True elif self.format == "coco_panoptic": if isinstance(annotations[0]["segments_info"], (list, tuple)): valid_annotations = True if not valid_annotations: raise ValueError( """ Annotations must of type `Dict` (single image) or `List[Dict]` (batch of images). In case of object detection, each dictionary should contain the keys 'image_id' and 'annotations', with the latter being a list of annotations in COCO format. In case of panoptic segmentation, each dictionary should contain the keys 'file_name', 'image_id' and 'segments_info', with the latter being a list of annotations in COCO format. """ ) # Check that masks_path has a valid type if masks_path is not None: if self.format == "coco_panoptic": if isinstance(masks_path, pathlib.Path): valid_masks_path = True if not valid_masks_path: raise ValueError( "The path to the directory containing the mask PNG files should be provided as a" " `pathlib.Path` object." ) if not is_batched: images = [images] if annotations is not None: annotations = [annotations] # Create a copy of the list to avoid editing it in place images = [image for image in images] if annotations is not None: annotations = [annotation for annotation in annotations] # prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image) if annotations is not None: for idx, (image, target) in enumerate(zip(images, annotations)): if not isinstance(image, Image.Image): image = self.to_pil_image(image) image, target = self.prepare(image, target, return_segmentation_masks, masks_path) images[idx] = image annotations[idx] = target # transformations (resizing + normalization) if self.do_resize and self.size is not None: if annotations is not None: for idx, (image, target) in enumerate(zip(images, annotations)): image, target = self._resize(image=image, target=target, size=self.size, max_size=self.max_size) images[idx] = image annotations[idx] = target else: for idx, image in enumerate(images): images[idx] = self._resize(image=image, target=None, size=self.size, max_size=self.max_size)[0] if self.do_normalize: if annotations is not None: for idx, (image, target) in enumerate(zip(images, annotations)): image, target = self._normalize( image=image, mean=self.image_mean, std=self.image_std, target=target ) images[idx] = image annotations[idx] = target else: images = [ self._normalize(image=image, mean=self.image_mean, std=self.image_std)[0] for image in images ] else: images = [np.array(image) for image in images] if pad_and_return_pixel_mask: # pad images up to largest image in batch and create pixel_mask max_size = self._max_by_axis([list(image.shape) for image in images]) c, h, w = max_size padded_images = [] pixel_mask = [] for image in images: # create padded image padded_image = np.zeros((c, h, w), dtype=np.float32) padded_image[: image.shape[0], : image.shape[1], : image.shape[2]] = np.copy(image) padded_images.append(padded_image) # create pixel mask mask = np.zeros((h, w), dtype=np.int64) mask[: image.shape[1], : image.shape[2]] = True pixel_mask.append(mask) images = padded_images # return as BatchFeature data = {} data["pixel_values"] = images if pad_and_return_pixel_mask: data["pixel_mask"] = pixel_mask encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) if annotations is not None: # Convert to TensorType tensor_type = return_tensors if not isinstance(tensor_type, TensorType): tensor_type = TensorType(tensor_type) if not tensor_type == TensorType.PYTORCH: raise ValueError("Only PyTorch is supported for the moment.") else: if not is_torch_available(): raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed.") encoded_inputs["labels"] = [ {k: torch.from_numpy(v) for k, v in target.items()} for target in annotations ] return encoded_inputs def _max_by_axis(self, the_list): # type: (List[List[int]]) -> List[int] maxes = the_list[0] for sublist in the_list[1:]: for index, item in enumerate(sublist): maxes[index] = max(maxes[index], item) return maxes def pad_and_create_pixel_mask( self, pixel_values_list: List["torch.Tensor"], return_tensors: Optional[Union[str, TensorType]] = None ): """ Pad images up to the largest image in a batch and create a corresponding `pixel_mask`. Args: pixel_values_list (`List[torch.Tensor]`): List of images (pixel values) to be padded. Each image should be a tensor of shape (C, H, W). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **pixel_values** -- Pixel values to be fed to a model. - **pixel_mask** -- Pixel mask to be fed to a model (when `pad_and_return_pixel_mask=True` or if *"pixel_mask"* is in `self.model_input_names`). """ max_size = self._max_by_axis([list(image.shape) for image in pixel_values_list]) c, h, w = max_size padded_images = [] pixel_mask = [] for image in pixel_values_list: # create padded image padded_image = np.zeros((c, h, w), dtype=np.float32) padded_image[: image.shape[0], : image.shape[1], : image.shape[2]] = np.copy(image) padded_images.append(padded_image) # create pixel mask mask = np.zeros((h, w), dtype=np.int64) mask[: image.shape[1], : image.shape[2]] = True pixel_mask.append(mask) # return as BatchFeature data = {"pixel_values": padded_images, "pixel_mask": pixel_mask} encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) return encoded_inputs # POSTPROCESSING METHODS # inspired by https://github.com/facebookresearch/detr/blob/master/models/detr.py#L258 def post_process(self, outputs, target_sizes): """ Converts the output of [`DetrForObjectDetection`] into the format expected by the COCO api. Only supports PyTorch. Args: outputs ([`DetrObjectDetectionOutput`]): Raw outputs of the model. target_sizes (`torch.Tensor` of shape `(batch_size, 2)`): Tensor containing the size (height, width) of each image of the batch. For evaluation, this must be the original image size (before any data augmentation). For visualization, this should be the image size after data augment, but before padding. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ warnings.warn( "`post_process` is deprecated and will be removed in v5 of Transformers, please use" " `post_process_object_detection`", FutureWarning, ) out_logits, out_bbox = outputs.logits, outputs.pred_boxes if len(out_logits) != len(target_sizes): raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits") if target_sizes.shape[1] != 2: raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch") prob = nn.functional.softmax(out_logits, -1) scores, labels = prob[..., :-1].max(-1) # convert to [x0, y0, x1, y1] format boxes = center_to_corners_format(out_bbox) # and from relative [0, 1] to absolute [0, height] coordinates img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) boxes = boxes * scale_fct[:, None, :] results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)] return results def post_process_segmentation(self, outputs, target_sizes, threshold=0.9, mask_threshold=0.5): """ Converts the output of [`DetrForSegmentation`] into image segmentation predictions. Only supports PyTorch. Args: outputs ([`DetrSegmentationOutput`]): Raw outputs of the model. target_sizes (`torch.Tensor` of shape `(batch_size, 2)` or `List[Tuple]` of length `batch_size`): Torch Tensor (or list) corresponding to the requested final size (h, w) of each prediction. threshold (`float`, *optional*, defaults to 0.9): Threshold to use to filter out queries. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels, and masks for an image in the batch as predicted by the model. """ warnings.warn( "`post_process_segmentation` is deprecated and will be removed in v5 of Transformers, please use" " `post_process_semantic_segmentation`.", FutureWarning, ) out_logits, raw_masks = outputs.logits, outputs.pred_masks preds = [] def to_tuple(tup): if isinstance(tup, tuple): return tup return tuple(tup.cpu().tolist()) for cur_logits, cur_masks, size in zip(out_logits, raw_masks, target_sizes): # we filter empty queries and detection below threshold scores, labels = cur_logits.softmax(-1).max(-1) keep = labels.ne(outputs.logits.shape[-1] - 1) & (scores > threshold) cur_scores, cur_classes = cur_logits.softmax(-1).max(-1) cur_scores = cur_scores[keep] cur_classes = cur_classes[keep] cur_masks = cur_masks[keep] cur_masks = nn.functional.interpolate(cur_masks[:, None], to_tuple(size), mode="bilinear").squeeze(1) cur_masks = (cur_masks.sigmoid() > mask_threshold) * 1 predictions = {"scores": cur_scores, "labels": cur_classes, "masks": cur_masks} preds.append(predictions) return preds # inspired by https://github.com/facebookresearch/detr/blob/master/models/segmentation.py#L218 def post_process_instance(self, results, outputs, orig_target_sizes, max_target_sizes, threshold=0.5): """ Converts the output of [`DetrForSegmentation`] into actual instance segmentation predictions. Only supports PyTorch. Args: results (`List[Dict]`): Results list obtained by [`~DetrFeatureExtractor.post_process`], to which "masks" results will be added. outputs ([`DetrSegmentationOutput`]): Raw outputs of the model. orig_target_sizes (`torch.Tensor` of shape `(batch_size, 2)`): Tensor containing the size (h, w) of each image of the batch. For evaluation, this must be the original image size (before any data augmentation). max_target_sizes (`torch.Tensor` of shape `(batch_size, 2)`): Tensor containing the maximum size (h, w) of each image of the batch. For evaluation, this must be the original image size (before any data augmentation). threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels, boxes and masks for an image in the batch as predicted by the model. """ warnings.warn( "`post_process_instance` is deprecated and will be removed in v5 of Transformers, please use" " `post_process_instance_segmentation`.", FutureWarning, ) if len(orig_target_sizes) != len(max_target_sizes): raise ValueError("Make sure to pass in as many orig_target_sizes as max_target_sizes") max_h, max_w = max_target_sizes.max(0)[0].tolist() outputs_masks = outputs.pred_masks.squeeze(2) outputs_masks = nn.functional.interpolate( outputs_masks, size=(max_h, max_w), mode="bilinear", align_corners=False ) outputs_masks = (outputs_masks.sigmoid() > threshold).cpu() for i, (cur_mask, t, tt) in enumerate(zip(outputs_masks, max_target_sizes, orig_target_sizes)): img_h, img_w = t[0], t[1] results[i]["masks"] = cur_mask[:, :img_h, :img_w].unsqueeze(1) results[i]["masks"] = nn.functional.interpolate( results[i]["masks"].float(), size=tuple(tt.tolist()), mode="nearest" ).byte() return results # inspired by https://github.com/facebookresearch/detr/blob/master/models/segmentation.py#L241 def post_process_panoptic(self, outputs, processed_sizes, target_sizes=None, is_thing_map=None, threshold=0.85): """ Converts the output of [`DetrForSegmentation`] into actual panoptic predictions. Only supports PyTorch. Args: outputs ([`DetrSegmentationOutput`]): Raw outputs of the model. processed_sizes (`torch.Tensor` of shape `(batch_size, 2)` or `List[Tuple]` of length `batch_size`): Torch Tensor (or list) containing the size (h, w) of each image of the batch, i.e. the size after data augmentation but before batching. target_sizes (`torch.Tensor` of shape `(batch_size, 2)` or `List[Tuple]` of length `batch_size`, *optional*): Torch Tensor (or list) corresponding to the requested final size (h, w) of each prediction. If left to None, it will default to the `processed_sizes`. is_thing_map (`torch.Tensor` of shape `(batch_size, 2)`, *optional*): Dictionary mapping class indices to either True or False, depending on whether or not they are a thing. If not set, defaults to the `is_thing_map` of COCO panoptic. threshold (`float`, *optional*, defaults to 0.85): Threshold to use to filter out queries. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing a PNG string and segments_info values for an image in the batch as predicted by the model. """ warnings.warn( "`post_process_panoptic is deprecated and will be removed in v5 of Transformers, please use" " `post_process_panoptic_segmentation`.", FutureWarning, ) if target_sizes is None: target_sizes = processed_sizes if len(processed_sizes) != len(target_sizes): raise ValueError("Make sure to pass in as many processed_sizes as target_sizes") if is_thing_map is None: # default to is_thing_map of COCO panoptic is_thing_map = {i: i <= 90 for i in range(201)} out_logits, raw_masks, raw_boxes = outputs.logits, outputs.pred_masks, outputs.pred_boxes if not len(out_logits) == len(raw_masks) == len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits and masks" ) preds = [] def to_tuple(tup): if isinstance(tup, tuple): return tup return tuple(tup.cpu().tolist()) for cur_logits, cur_masks, cur_boxes, size, target_size in zip( out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes ): # we filter empty queries and detection below threshold scores, labels = cur_logits.softmax(-1).max(-1) keep = labels.ne(outputs.logits.shape[-1] - 1) & (scores > threshold) cur_scores, cur_classes = cur_logits.softmax(-1).max(-1) cur_scores = cur_scores[keep] cur_classes = cur_classes[keep] cur_masks = cur_masks[keep] cur_masks = nn.functional.interpolate(cur_masks[:, None], to_tuple(size), mode="bilinear").squeeze(1) cur_boxes = center_to_corners_format(cur_boxes[keep]) h, w = cur_masks.shape[-2:] if len(cur_boxes) != len(cur_classes): raise ValueError("Not as many boxes as there are classes") # It may be that we have several predicted masks for the same stuff class. # In the following, we track the list of masks ids for each stuff class (they are merged later on) cur_masks = cur_masks.flatten(1) stuff_equiv_classes = defaultdict(lambda: []) for k, label in enumerate(cur_classes): if not is_thing_map[label.item()]: stuff_equiv_classes[label.item()].append(k) def get_ids_area(masks, scores, dedup=False): # This helper function creates the final panoptic segmentation image # It also returns the area of the masks that appears on the image m_id = masks.transpose(0, 1).softmax(-1) if m_id.shape[-1] == 0: # We didn't detect any mask :( m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device) else: m_id = m_id.argmax(-1).view(h, w) if dedup: # Merge the masks corresponding to the same stuff class for equiv in stuff_equiv_classes.values(): if len(equiv) > 1: for eq_id in equiv: m_id.masked_fill_(m_id.eq(eq_id), equiv[0]) final_h, final_w = to_tuple(target_size) seg_img = Image.fromarray(id_to_rgb(m_id.view(h, w).cpu().numpy())) seg_img = seg_img.resize(size=(final_w, final_h), resample=Image.NEAREST) np_seg_img = torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes())) np_seg_img = np_seg_img.view(final_h, final_w, 3) np_seg_img = np_seg_img.numpy() m_id = torch.from_numpy(rgb_to_id(np_seg_img)) area = [] for i in range(len(scores)): area.append(m_id.eq(i).sum().item()) return area, seg_img area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True) if cur_classes.numel() > 0: # We know filter empty masks as long as we find some while True: filtered_small = torch.as_tensor( [area[i] <= 4 for i, c in enumerate(cur_classes)], dtype=torch.bool, device=keep.device ) if filtered_small.any().item(): cur_scores = cur_scores[~filtered_small] cur_classes = cur_classes[~filtered_small] cur_masks = cur_masks[~filtered_small] area, seg_img = get_ids_area(cur_masks, cur_scores) else: break else: cur_classes = torch.ones(1, dtype=torch.long, device=cur_classes.device) segments_info = [] for i, a in enumerate(area): cat = cur_classes[i].item() segments_info.append({"id": i, "isthing": is_thing_map[cat], "category_id": cat, "area": a}) del cur_classes with io.BytesIO() as out: seg_img.save(out, format="PNG") predictions = {"png_string": out.getvalue(), "segments_info": segments_info} preds.append(predictions) return preds def post_process_object_detection( self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, List[Tuple]] = None ): """ Converts the output of [`DetrForObjectDetection`] into the format expected by the COCO api. Only supports PyTorch. Args: outputs ([`DetrObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*, defaults to `None`): Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the batch. If left to None, predictions will not be resized. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ out_logits, out_bbox = outputs.logits, outputs.pred_boxes if target_sizes is not None: if len(out_logits) != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) prob = nn.functional.softmax(out_logits, -1) scores, labels = prob[..., :-1].max(-1) # Convert to [x0, y0, x1, y1] format boxes = center_to_corners_format(out_bbox) # Convert from relative [0, 1] to absolute [0, height] coordinates if target_sizes is not None: if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) else: img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) boxes = boxes * scale_fct[:, None, :] results = [] for s, l, b in zip(scores, labels, boxes): score = s[s > threshold] label = l[s > threshold] box = b[s > threshold] results.append({"scores": score, "labels": label, "boxes": box}) return results def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple[int, int]] = None): """ Converts the output of [`DetrForSegmentation`] into semantic segmentation maps. Only supports PyTorch. Args: outputs ([`DetrForSegmentation`]): Raw outputs of the model. target_sizes (`List[Tuple[int, int]]`, *optional*, defaults to `None`): A list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the batch. If left to None, predictions will not be resized. Returns: `List[torch.Tensor]`: A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each `torch.Tensor` correspond to a semantic class id. """ class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width] # Remove the null class `[..., :-1]` masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1] masks_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Semantic segmentation logits of shape (batch_size, num_classes, height, width) segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs) batch_size = class_queries_logits.shape[0] # Resize logits and compute semantic segmentation maps if target_sizes is not None: if batch_size != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) semantic_segmentation = [] for idx in range(batch_size): resized_logits = nn.functional.interpolate( segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False ) semantic_map = resized_logits[0].argmax(dim=0) semantic_segmentation.append(semantic_map) else: semantic_segmentation = segmentation.argmax(dim=1) semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation def post_process_instance_segmentation( self, outputs, threshold: float = 0.5, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, target_sizes: Optional[List[Tuple[int, int]]] = None, return_coco_annotation: Optional[bool] = False, ) -> List[Dict]: """ Converts the output of [`DetrForSegmentation`] into instance segmentation predictions. Only supports PyTorch. Args: outputs ([`DetrForSegmentation`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. target_sizes (`List[Tuple]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction. If left to None, predictions will not be resized. return_coco_annotation (`bool`, *optional*): Defaults to `False`. If set to `True`, segmentation maps are returned in COCO run-length encoding (RLE) format. Returns: `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- A tensor of shape `(height, width)` where each pixel represents a `segment_id` or `List[List]` run-length encoding (RLE) of the segmentation map if return_coco_annotation is set to `True`. Set to `None` if no mask if found above `threshold`. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- An integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **score** -- Prediction score of segment with `segment_id`. """ class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width] batch_size = class_queries_logits.shape[0] num_labels = class_queries_logits.shape[-1] - 1 mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Predicted label and score of each query (batch_size, num_queries) pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1) # Loop over items in batch size results: List[Dict[str, TensorType]] = [] for i in range(batch_size): mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects( mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels ) # No mask found if mask_probs_item.shape[0] <= 0: height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:] segmentation = torch.zeros((height, width)) - 1 results.append({"segmentation": segmentation, "segments_info": []}) continue # Get segmentation map and segment information of batch item target_size = target_sizes[i] if target_sizes is not None else None segmentation, segments = compute_segments( mask_probs=mask_probs_item, pred_scores=pred_scores_item, pred_labels=pred_labels_item, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, label_ids_to_fuse=[], target_size=target_size, ) # Return segmentation map in run-length encoding (RLE) format if return_coco_annotation: segmentation = convert_segmentation_to_rle(segmentation) results.append({"segmentation": segmentation, "segments_info": segments}) return results def post_process_panoptic_segmentation( self, outputs, threshold: float = 0.5, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[Set[int]] = None, target_sizes: Optional[List[Tuple[int, int]]] = None, ) -> List[Dict]: """ Converts the output of [`DetrForSegmentation`] into image panoptic segmentation predictions. Only supports PyTorch. Args: outputs ([`DetrForSegmentation`]): The outputs from [`DetrForSegmentation`]. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. label_ids_to_fuse (`Set[int]`, *optional*): The labels in this state will have all their instances be fused together. For instance we could say there can only be one sky in an image, but several persons, so the label ID for sky would be in that set, but not the one for person. target_sizes (`List[Tuple]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction in batch. If left to None, predictions will not be resized. Returns: `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id` or `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized to the corresponding `target_sizes` entry. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- an integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise. Multiple instances of the same class / label were fused and assigned a single `segment_id`. - **score** -- Prediction score of segment with `segment_id`. """ if label_ids_to_fuse is None: warnings.warn("`label_ids_to_fuse` unset. No instance will be fused.") label_ids_to_fuse = set() class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width] batch_size = class_queries_logits.shape[0] num_labels = class_queries_logits.shape[-1] - 1 mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Predicted label and score of each query (batch_size, num_queries) pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1) # Loop over items in batch size results: List[Dict[str, TensorType]] = [] for i in range(batch_size): mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects( mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels ) # No mask found if mask_probs_item.shape[0] <= 0: height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:] segmentation = torch.zeros((height, width)) - 1 results.append({"segmentation": segmentation, "segments_info": []}) continue # Get segmentation map and segment information of batch item target_size = target_sizes[i] if target_sizes is not None else None segmentation, segments = compute_segments( mask_probs=mask_probs_item, pred_scores=pred_scores_item, pred_labels=pred_labels_item, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, label_ids_to_fuse=label_ids_to_fuse, target_size=target_size, ) results.append({"segmentation": segmentation, "segments_info": segments}) return results
1
huggingface/transformers
20,304
fix device issue
# What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
ydshieh
"2022-11-17T17:48:16Z"
"2022-11-21T09:12:26Z"
d316037ad71f8748aac9045ffd96970826456a04
8503cc755050c6ed5bc771e3244c29b71be1841e
fix device issue. # What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
./src/transformers/models/yolos/feature_extraction_yolos.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Feature extractor class for YOLOS.""" import pathlib import warnings from typing import Dict, List, Optional, Tuple, Union import numpy as np from PIL import Image from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...image_transforms import center_to_corners_format, corners_to_center_format, rgb_to_id from ...image_utils import ImageFeatureExtractionMixin, is_torch_tensor from ...utils import TensorType, is_torch_available, logging if is_torch_available(): import torch from torch import nn logger = logging.get_logger(__name__) ImageInput = Union[Image.Image, np.ndarray, "torch.Tensor", List[Image.Image], List[np.ndarray], List["torch.Tensor"]] # Copied from transformers.models.detr.feature_extraction_detr.masks_to_boxes def masks_to_boxes(masks): """ Compute the bounding boxes around the provided panoptic segmentation masks. The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions. Returns a [N, 4] tensor, with the boxes in corner (xyxy) format. """ if masks.size == 0: return np.zeros((0, 4)) h, w = masks.shape[-2:] y = np.arange(0, h, dtype=np.float32) x = np.arange(0, w, dtype=np.float32) # see https://github.com/pytorch/pytorch/issues/50276 y, x = np.meshgrid(y, x, indexing="ij") x_mask = masks * np.expand_dims(x, axis=0) x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1) x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool))) x_min = x.filled(fill_value=1e8) x_min = x_min.reshape(x_min.shape[0], -1).min(-1) y_mask = masks * np.expand_dims(y, axis=0) y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1) y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool))) y_min = y.filled(fill_value=1e8) y_min = y_min.reshape(y_min.shape[0], -1).min(-1) return np.stack([x_min, y_min, x_max, y_max], 1) class YolosFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin): r""" Constructs a YOLOS feature extractor. This feature extractor inherits from [`FeatureExtractionMixin`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: format (`str`, *optional*, defaults to `"coco_detection"`): Data format of the annotations. One of "coco_detection" or "coco_panoptic". do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the input to a certain `size`. size (`int`, *optional*, defaults to 800): Resize the input to the given size. Only has an effect if `do_resize` is set to `True`. If size is a sequence like `(width, height)`, output size will be matched to this. If size is an int, smaller edge of the image will be matched to this number. i.e, if `height > width`, then image will be rescaled to `(size * height / width, size)`. max_size (`int`, *optional*, defaults to `1333`): The largest size an image dimension can have (otherwise it's capped). Only has an effect if `do_resize` is set to `True`. do_normalize (`bool`, *optional*, defaults to `True`): Whether or not to normalize the input with mean and standard deviation. image_mean (`int`, *optional*, defaults to `[0.485, 0.456, 0.406]`): The sequence of means for each channel, to be used when normalizing images. Defaults to the ImageNet mean. image_std (`int`, *optional*, defaults to `[0.229, 0.224, 0.225]`): The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the ImageNet std. """ model_input_names = ["pixel_values"] # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.__init__ def __init__( self, format="coco_detection", do_resize=True, size=800, max_size=1333, do_normalize=True, image_mean=None, image_std=None, **kwargs ): super().__init__(**kwargs) self.format = self._is_valid_format(format) self.do_resize = do_resize self.size = size self.max_size = max_size self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else [0.485, 0.456, 0.406] # ImageNet mean self.image_std = image_std if image_std is not None else [0.229, 0.224, 0.225] # ImageNet std # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor._is_valid_format def _is_valid_format(self, format): if format not in ["coco_detection", "coco_panoptic"]: raise ValueError(f"Format {format} not supported") return format # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.prepare def prepare(self, image, target, return_segmentation_masks=False, masks_path=None): if self.format == "coco_detection": image, target = self.prepare_coco_detection(image, target, return_segmentation_masks) return image, target elif self.format == "coco_panoptic": image, target = self.prepare_coco_panoptic(image, target, masks_path) return image, target else: raise ValueError(f"Format {self.format} not supported") # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.convert_coco_poly_to_mask def convert_coco_poly_to_mask(self, segmentations, height, width): try: from pycocotools import mask as coco_mask except ImportError: raise ImportError("Pycocotools is not installed in your environment.") masks = [] for polygons in segmentations: rles = coco_mask.frPyObjects(polygons, height, width) mask = coco_mask.decode(rles) if len(mask.shape) < 3: mask = mask[..., None] mask = np.asarray(mask, dtype=np.uint8) mask = np.any(mask, axis=2) masks.append(mask) if masks: masks = np.stack(masks, axis=0) else: masks = np.zeros((0, height, width), dtype=np.uint8) return masks # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.prepare_coco_detection def prepare_coco_detection(self, image, target, return_segmentation_masks=False): """ Convert the target in COCO format into the format expected by DETR. """ w, h = image.size image_id = target["image_id"] image_id = np.asarray([image_id], dtype=np.int64) # get all COCO annotations for the given image anno = target["annotations"] anno = [obj for obj in anno if "iscrowd" not in obj or obj["iscrowd"] == 0] boxes = [obj["bbox"] for obj in anno] # guard against no boxes via resizing boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4) boxes[:, 2:] += boxes[:, :2] boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=w) boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=h) classes = [obj["category_id"] for obj in anno] classes = np.asarray(classes, dtype=np.int64) if return_segmentation_masks: segmentations = [obj["segmentation"] for obj in anno] masks = self.convert_coco_poly_to_mask(segmentations, h, w) keypoints = None if anno and "keypoints" in anno[0]: keypoints = [obj["keypoints"] for obj in anno] keypoints = np.asarray(keypoints, dtype=np.float32) num_keypoints = keypoints.shape[0] if num_keypoints: keypoints = keypoints.reshape((-1, 3)) keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) boxes = boxes[keep] classes = classes[keep] if return_segmentation_masks: masks = masks[keep] if keypoints is not None: keypoints = keypoints[keep] target = {} target["boxes"] = boxes target["class_labels"] = classes if return_segmentation_masks: target["masks"] = masks target["image_id"] = image_id if keypoints is not None: target["keypoints"] = keypoints # for conversion to coco api area = np.asarray([obj["area"] for obj in anno], dtype=np.float32) iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in anno], dtype=np.int64) target["area"] = area[keep] target["iscrowd"] = iscrowd[keep] target["orig_size"] = np.asarray([int(h), int(w)], dtype=np.int64) target["size"] = np.asarray([int(h), int(w)], dtype=np.int64) return image, target # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.prepare_coco_panoptic def prepare_coco_panoptic(self, image, target, masks_path, return_masks=True): w, h = image.size ann_info = target.copy() ann_path = pathlib.Path(masks_path) / ann_info["file_name"] if "segments_info" in ann_info: masks = np.asarray(Image.open(ann_path), dtype=np.uint32) masks = rgb_to_id(masks) ids = np.array([ann["id"] for ann in ann_info["segments_info"]]) masks = masks == ids[:, None, None] masks = np.asarray(masks, dtype=np.uint8) labels = np.asarray([ann["category_id"] for ann in ann_info["segments_info"]], dtype=np.int64) target = {} target["image_id"] = np.asarray( [ann_info["image_id"] if "image_id" in ann_info else ann_info["id"]], dtype=np.int64 ) if return_masks: target["masks"] = masks target["class_labels"] = labels target["boxes"] = masks_to_boxes(masks) target["size"] = np.asarray([int(h), int(w)], dtype=np.int64) target["orig_size"] = np.asarray([int(h), int(w)], dtype=np.int64) if "segments_info" in ann_info: target["iscrowd"] = np.asarray([ann["iscrowd"] for ann in ann_info["segments_info"]], dtype=np.int64) target["area"] = np.asarray([ann["area"] for ann in ann_info["segments_info"]], dtype=np.float32) return image, target # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor._resize def _resize(self, image, size, target=None, max_size=None): """ Resize the image to the given size. Size can be min_size (scalar) or (w, h) tuple. If size is an int, smaller edge of the image will be matched to this number. If given, also resize the target accordingly. """ if not isinstance(image, Image.Image): image = self.to_pil_image(image) def get_size_with_aspect_ratio(image_size, size, max_size=None): w, h = image_size if max_size is not None: min_original_size = float(min((w, h))) max_original_size = float(max((w, h))) if max_original_size / min_original_size * size > max_size: size = int(round(max_size * min_original_size / max_original_size)) if (w <= h and w == size) or (h <= w and h == size): return (h, w) if w < h: ow = size oh = int(size * h / w) else: oh = size ow = int(size * w / h) return (oh, ow) def get_size(image_size, size, max_size=None): if isinstance(size, (list, tuple)): return size else: # size returned must be (w, h) since we use PIL to resize images # so we revert the tuple return get_size_with_aspect_ratio(image_size, size, max_size)[::-1] size = get_size(image.size, size, max_size) rescaled_image = self.resize(image, size=size) if target is None: return rescaled_image, None ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size)) ratio_width, ratio_height = ratios target = target.copy() if "boxes" in target: boxes = target["boxes"] scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32) target["boxes"] = scaled_boxes if "area" in target: area = target["area"] scaled_area = area * (ratio_width * ratio_height) target["area"] = scaled_area w, h = size target["size"] = np.asarray([h, w], dtype=np.int64) if "masks" in target: # use PyTorch as current workaround # TODO replace by self.resize masks = torch.from_numpy(target["masks"][:, None]).float() interpolated_masks = nn.functional.interpolate(masks, size=(h, w), mode="nearest")[:, 0] > 0.5 target["masks"] = interpolated_masks.numpy() return rescaled_image, target # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor._normalize def _normalize(self, image, mean, std, target=None): """ Normalize the image with a certain mean and std. If given, also normalize the target bounding boxes based on the size of the image. """ image = self.normalize(image, mean=mean, std=std) if target is None: return image, None target = target.copy() h, w = image.shape[-2:] if "boxes" in target: boxes = target["boxes"] boxes = corners_to_center_format(boxes) boxes = boxes / np.asarray([w, h, w, h], dtype=np.float32) target["boxes"] = boxes return image, target def __call__( self, images: ImageInput, annotations: Union[List[Dict], List[List[Dict]]] = None, return_segmentation_masks: Optional[bool] = False, masks_path: Optional[pathlib.Path] = None, padding: Optional[bool] = True, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ) -> BatchFeature: """ Main method to prepare for the model one or several image(s) and optional annotations. Images are by default padded up to the largest image in a batch. <Tip warning={true}> NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass PIL images. </Tip> Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. annotations (`Dict`, `List[Dict]`, *optional*): The corresponding annotations in COCO format. In case [`DetrFeatureExtractor`] was initialized with `format = "coco_detection"`, the annotations for each image should have the following format: {'image_id': int, 'annotations': [annotation]}, with the annotations being a list of COCO object annotations. In case [`DetrFeatureExtractor`] was initialized with `format = "coco_panoptic"`, the annotations for each image should have the following format: {'image_id': int, 'file_name': str, 'segments_info': [segment_info]} with segments_info being a list of COCO panoptic annotations. return_segmentation_masks (`Dict`, `List[Dict]`, *optional*, defaults to `False`): Whether to also include instance segmentation masks as part of the labels in case `format = "coco_detection"`. masks_path (`pathlib.Path`, *optional*): Path to the directory containing the PNG files that store the class-agnostic image segmentations. Only relevant in case [`DetrFeatureExtractor`] was initialized with `format = "coco_panoptic"`. padding (`bool`, *optional*, defaults to `True`): Whether or not to pad images up to the largest image in a batch. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **pixel_values** -- Pixel values to be fed to a model. - **labels** -- Optional labels to be fed to a model (when `annotations` are provided) """ # Input type checking for clearer error valid_images = False valid_annotations = False valid_masks_path = False # Check that images has a valid type if isinstance(images, (Image.Image, np.ndarray)) or is_torch_tensor(images): valid_images = True elif isinstance(images, (list, tuple)): if len(images) == 0 or isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0]): valid_images = True if not valid_images: raise ValueError( "Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example), " "`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples)." ) is_batched = bool( isinstance(images, (list, tuple)) and (isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0])) ) # Check that annotations has a valid type if annotations is not None: if not is_batched: if self.format == "coco_detection": if isinstance(annotations, dict) and "image_id" in annotations and "annotations" in annotations: if isinstance(annotations["annotations"], (list, tuple)): # an image can have no annotations if len(annotations["annotations"]) == 0 or isinstance(annotations["annotations"][0], dict): valid_annotations = True elif self.format == "coco_panoptic": if isinstance(annotations, dict) and "image_id" in annotations and "segments_info" in annotations: if isinstance(annotations["segments_info"], (list, tuple)): # an image can have no segments (?) if len(annotations["segments_info"]) == 0 or isinstance( annotations["segments_info"][0], dict ): valid_annotations = True else: if isinstance(annotations, (list, tuple)): if len(images) != len(annotations): raise ValueError("There must be as many annotations as there are images") if isinstance(annotations[0], Dict): if self.format == "coco_detection": if isinstance(annotations[0]["annotations"], (list, tuple)): valid_annotations = True elif self.format == "coco_panoptic": if isinstance(annotations[0]["segments_info"], (list, tuple)): valid_annotations = True if not valid_annotations: raise ValueError( """ Annotations must of type `Dict` (single image) or `List[Dict]` (batch of images). In case of object detection, each dictionary should contain the keys 'image_id' and 'annotations', with the latter being a list of annotations in COCO format. In case of panoptic segmentation, each dictionary should contain the keys 'file_name', 'image_id' and 'segments_info', with the latter being a list of annotations in COCO format. """ ) # Check that masks_path has a valid type if masks_path is not None: if self.format == "coco_panoptic": if isinstance(masks_path, pathlib.Path): valid_masks_path = True if not valid_masks_path: raise ValueError( "The path to the directory containing the mask PNG files should be provided as a" " `pathlib.Path` object." ) if not is_batched: images = [images] if annotations is not None: annotations = [annotations] # prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image) if annotations is not None: for idx, (image, target) in enumerate(zip(images, annotations)): if not isinstance(image, Image.Image): image = self.to_pil_image(image) image, target = self.prepare(image, target, return_segmentation_masks, masks_path) images[idx] = image annotations[idx] = target # transformations (resizing + normalization) if self.do_resize and self.size is not None: if annotations is not None: for idx, (image, target) in enumerate(zip(images, annotations)): image, target = self._resize(image=image, target=target, size=self.size, max_size=self.max_size) images[idx] = image annotations[idx] = target else: for idx, image in enumerate(images): images[idx] = self._resize(image=image, target=None, size=self.size, max_size=self.max_size)[0] if self.do_normalize: if annotations is not None: for idx, (image, target) in enumerate(zip(images, annotations)): image, target = self._normalize( image=image, mean=self.image_mean, std=self.image_std, target=target ) images[idx] = image annotations[idx] = target else: images = [ self._normalize(image=image, mean=self.image_mean, std=self.image_std)[0] for image in images ] if padding: # pad images up to largest image in batch max_size = self._max_by_axis([list(image.shape) for image in images]) c, h, w = max_size padded_images = [] for image in images: # create padded image padded_image = np.zeros((c, h, w), dtype=np.float32) padded_image[: image.shape[0], : image.shape[1], : image.shape[2]] = np.copy(image) padded_images.append(padded_image) images = padded_images # return as BatchFeature data = {} data["pixel_values"] = images encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) if annotations is not None: # Convert to TensorType tensor_type = return_tensors if not isinstance(tensor_type, TensorType): tensor_type = TensorType(tensor_type) if not tensor_type == TensorType.PYTORCH: raise ValueError("Only PyTorch is supported for the moment.") else: if not is_torch_available(): raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed.") encoded_inputs["labels"] = [ {k: torch.from_numpy(v) for k, v in target.items()} for target in annotations ] return encoded_inputs # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor._max_by_axis def _max_by_axis(self, the_list): # type: (List[List[int]]) -> List[int] maxes = the_list[0] for sublist in the_list[1:]: for index, item in enumerate(sublist): maxes[index] = max(maxes[index], item) return maxes def pad(self, pixel_values_list: List["torch.Tensor"], return_tensors: Optional[Union[str, TensorType]] = None): """ Pad images up to the largest image in a batch. Args: pixel_values_list (`List[torch.Tensor]`): List of images (pixel values) to be padded. Each image should be a tensor of shape (C, H, W). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following field: - **pixel_values** -- Pixel values to be fed to a model. """ max_size = self._max_by_axis([list(image.shape) for image in pixel_values_list]) c, h, w = max_size padded_images = [] for image in pixel_values_list: # create padded image padded_image = np.zeros((c, h, w), dtype=np.float32) padded_image[: image.shape[0], : image.shape[1], : image.shape[2]] = np.copy(image) padded_images.append(padded_image) # return as BatchFeature data = {"pixel_values": padded_images} encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) return encoded_inputs # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.post_process def post_process(self, outputs, target_sizes): """ Converts the output of [`DetrForObjectDetection`] into the format expected by the COCO api. Only supports PyTorch. Args: outputs ([`DetrObjectDetectionOutput`]): Raw outputs of the model. target_sizes (`torch.Tensor` of shape `(batch_size, 2)`): Tensor containing the size (height, width) of each image of the batch. For evaluation, this must be the original image size (before any data augmentation). For visualization, this should be the image size after data augment, but before padding. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ warnings.warn( "`post_process` is deprecated and will be removed in v5 of Transformers, please use" " `post_process_object_detection`", FutureWarning, ) out_logits, out_bbox = outputs.logits, outputs.pred_boxes if len(out_logits) != len(target_sizes): raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits") if target_sizes.shape[1] != 2: raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch") prob = nn.functional.softmax(out_logits, -1) scores, labels = prob[..., :-1].max(-1) # convert to [x0, y0, x1, y1] format boxes = center_to_corners_format(out_bbox) # and from relative [0, 1] to absolute [0, height] coordinates img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) boxes = boxes * scale_fct[:, None, :] results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)] return results # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.post_process_object_detection with Detr->Yolos def post_process_object_detection( self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, List[Tuple]] = None ): """ Converts the output of [`YolosForObjectDetection`] into the format expected by the COCO api. Only supports PyTorch. Args: outputs ([`YolosObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*, defaults to `None`): Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the batch. If left to None, predictions will not be resized. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ out_logits, out_bbox = outputs.logits, outputs.pred_boxes if target_sizes is not None: if len(out_logits) != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) prob = nn.functional.softmax(out_logits, -1) scores, labels = prob[..., :-1].max(-1) # Convert to [x0, y0, x1, y1] format boxes = center_to_corners_format(out_bbox) # Convert from relative [0, 1] to absolute [0, height] coordinates if target_sizes is not None: if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) else: img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) boxes = boxes * scale_fct[:, None, :] results = [] for s, l, b in zip(scores, labels, boxes): score = s[s > threshold] label = l[s > threshold] box = b[s > threshold] results.append({"scores": score, "labels": label, "boxes": box}) return results
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Feature extractor class for YOLOS.""" import pathlib import warnings from typing import Dict, List, Optional, Tuple, Union import numpy as np from PIL import Image from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...image_transforms import center_to_corners_format, corners_to_center_format, rgb_to_id from ...image_utils import ImageFeatureExtractionMixin, is_torch_tensor from ...utils import TensorType, is_torch_available, logging if is_torch_available(): import torch from torch import nn logger = logging.get_logger(__name__) ImageInput = Union[Image.Image, np.ndarray, "torch.Tensor", List[Image.Image], List[np.ndarray], List["torch.Tensor"]] # Copied from transformers.models.detr.feature_extraction_detr.masks_to_boxes def masks_to_boxes(masks): """ Compute the bounding boxes around the provided panoptic segmentation masks. The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions. Returns a [N, 4] tensor, with the boxes in corner (xyxy) format. """ if masks.size == 0: return np.zeros((0, 4)) h, w = masks.shape[-2:] y = np.arange(0, h, dtype=np.float32) x = np.arange(0, w, dtype=np.float32) # see https://github.com/pytorch/pytorch/issues/50276 y, x = np.meshgrid(y, x, indexing="ij") x_mask = masks * np.expand_dims(x, axis=0) x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1) x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool))) x_min = x.filled(fill_value=1e8) x_min = x_min.reshape(x_min.shape[0], -1).min(-1) y_mask = masks * np.expand_dims(y, axis=0) y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1) y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool))) y_min = y.filled(fill_value=1e8) y_min = y_min.reshape(y_min.shape[0], -1).min(-1) return np.stack([x_min, y_min, x_max, y_max], 1) class YolosFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin): r""" Constructs a YOLOS feature extractor. This feature extractor inherits from [`FeatureExtractionMixin`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: format (`str`, *optional*, defaults to `"coco_detection"`): Data format of the annotations. One of "coco_detection" or "coco_panoptic". do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the input to a certain `size`. size (`int`, *optional*, defaults to 800): Resize the input to the given size. Only has an effect if `do_resize` is set to `True`. If size is a sequence like `(width, height)`, output size will be matched to this. If size is an int, smaller edge of the image will be matched to this number. i.e, if `height > width`, then image will be rescaled to `(size * height / width, size)`. max_size (`int`, *optional*, defaults to `1333`): The largest size an image dimension can have (otherwise it's capped). Only has an effect if `do_resize` is set to `True`. do_normalize (`bool`, *optional*, defaults to `True`): Whether or not to normalize the input with mean and standard deviation. image_mean (`int`, *optional*, defaults to `[0.485, 0.456, 0.406]`): The sequence of means for each channel, to be used when normalizing images. Defaults to the ImageNet mean. image_std (`int`, *optional*, defaults to `[0.229, 0.224, 0.225]`): The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the ImageNet std. """ model_input_names = ["pixel_values"] # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.__init__ def __init__( self, format="coco_detection", do_resize=True, size=800, max_size=1333, do_normalize=True, image_mean=None, image_std=None, **kwargs ): super().__init__(**kwargs) self.format = self._is_valid_format(format) self.do_resize = do_resize self.size = size self.max_size = max_size self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else [0.485, 0.456, 0.406] # ImageNet mean self.image_std = image_std if image_std is not None else [0.229, 0.224, 0.225] # ImageNet std # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor._is_valid_format def _is_valid_format(self, format): if format not in ["coco_detection", "coco_panoptic"]: raise ValueError(f"Format {format} not supported") return format # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.prepare def prepare(self, image, target, return_segmentation_masks=False, masks_path=None): if self.format == "coco_detection": image, target = self.prepare_coco_detection(image, target, return_segmentation_masks) return image, target elif self.format == "coco_panoptic": image, target = self.prepare_coco_panoptic(image, target, masks_path) return image, target else: raise ValueError(f"Format {self.format} not supported") # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.convert_coco_poly_to_mask def convert_coco_poly_to_mask(self, segmentations, height, width): try: from pycocotools import mask as coco_mask except ImportError: raise ImportError("Pycocotools is not installed in your environment.") masks = [] for polygons in segmentations: rles = coco_mask.frPyObjects(polygons, height, width) mask = coco_mask.decode(rles) if len(mask.shape) < 3: mask = mask[..., None] mask = np.asarray(mask, dtype=np.uint8) mask = np.any(mask, axis=2) masks.append(mask) if masks: masks = np.stack(masks, axis=0) else: masks = np.zeros((0, height, width), dtype=np.uint8) return masks # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.prepare_coco_detection def prepare_coco_detection(self, image, target, return_segmentation_masks=False): """ Convert the target in COCO format into the format expected by DETR. """ w, h = image.size image_id = target["image_id"] image_id = np.asarray([image_id], dtype=np.int64) # get all COCO annotations for the given image anno = target["annotations"] anno = [obj for obj in anno if "iscrowd" not in obj or obj["iscrowd"] == 0] boxes = [obj["bbox"] for obj in anno] # guard against no boxes via resizing boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4) boxes[:, 2:] += boxes[:, :2] boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=w) boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=h) classes = [obj["category_id"] for obj in anno] classes = np.asarray(classes, dtype=np.int64) if return_segmentation_masks: segmentations = [obj["segmentation"] for obj in anno] masks = self.convert_coco_poly_to_mask(segmentations, h, w) keypoints = None if anno and "keypoints" in anno[0]: keypoints = [obj["keypoints"] for obj in anno] keypoints = np.asarray(keypoints, dtype=np.float32) num_keypoints = keypoints.shape[0] if num_keypoints: keypoints = keypoints.reshape((-1, 3)) keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) boxes = boxes[keep] classes = classes[keep] if return_segmentation_masks: masks = masks[keep] if keypoints is not None: keypoints = keypoints[keep] target = {} target["boxes"] = boxes target["class_labels"] = classes if return_segmentation_masks: target["masks"] = masks target["image_id"] = image_id if keypoints is not None: target["keypoints"] = keypoints # for conversion to coco api area = np.asarray([obj["area"] for obj in anno], dtype=np.float32) iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in anno], dtype=np.int64) target["area"] = area[keep] target["iscrowd"] = iscrowd[keep] target["orig_size"] = np.asarray([int(h), int(w)], dtype=np.int64) target["size"] = np.asarray([int(h), int(w)], dtype=np.int64) return image, target # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.prepare_coco_panoptic def prepare_coco_panoptic(self, image, target, masks_path, return_masks=True): w, h = image.size ann_info = target.copy() ann_path = pathlib.Path(masks_path) / ann_info["file_name"] if "segments_info" in ann_info: masks = np.asarray(Image.open(ann_path), dtype=np.uint32) masks = rgb_to_id(masks) ids = np.array([ann["id"] for ann in ann_info["segments_info"]]) masks = masks == ids[:, None, None] masks = np.asarray(masks, dtype=np.uint8) labels = np.asarray([ann["category_id"] for ann in ann_info["segments_info"]], dtype=np.int64) target = {} target["image_id"] = np.asarray( [ann_info["image_id"] if "image_id" in ann_info else ann_info["id"]], dtype=np.int64 ) if return_masks: target["masks"] = masks target["class_labels"] = labels target["boxes"] = masks_to_boxes(masks) target["size"] = np.asarray([int(h), int(w)], dtype=np.int64) target["orig_size"] = np.asarray([int(h), int(w)], dtype=np.int64) if "segments_info" in ann_info: target["iscrowd"] = np.asarray([ann["iscrowd"] for ann in ann_info["segments_info"]], dtype=np.int64) target["area"] = np.asarray([ann["area"] for ann in ann_info["segments_info"]], dtype=np.float32) return image, target # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor._resize def _resize(self, image, size, target=None, max_size=None): """ Resize the image to the given size. Size can be min_size (scalar) or (w, h) tuple. If size is an int, smaller edge of the image will be matched to this number. If given, also resize the target accordingly. """ if not isinstance(image, Image.Image): image = self.to_pil_image(image) def get_size_with_aspect_ratio(image_size, size, max_size=None): w, h = image_size if max_size is not None: min_original_size = float(min((w, h))) max_original_size = float(max((w, h))) if max_original_size / min_original_size * size > max_size: size = int(round(max_size * min_original_size / max_original_size)) if (w <= h and w == size) or (h <= w and h == size): return (h, w) if w < h: ow = size oh = int(size * h / w) else: oh = size ow = int(size * w / h) return (oh, ow) def get_size(image_size, size, max_size=None): if isinstance(size, (list, tuple)): return size else: # size returned must be (w, h) since we use PIL to resize images # so we revert the tuple return get_size_with_aspect_ratio(image_size, size, max_size)[::-1] size = get_size(image.size, size, max_size) rescaled_image = self.resize(image, size=size) if target is None: return rescaled_image, None ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size)) ratio_width, ratio_height = ratios target = target.copy() if "boxes" in target: boxes = target["boxes"] scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32) target["boxes"] = scaled_boxes if "area" in target: area = target["area"] scaled_area = area * (ratio_width * ratio_height) target["area"] = scaled_area w, h = size target["size"] = np.asarray([h, w], dtype=np.int64) if "masks" in target: # use PyTorch as current workaround # TODO replace by self.resize masks = torch.from_numpy(target["masks"][:, None]).float() interpolated_masks = nn.functional.interpolate(masks, size=(h, w), mode="nearest")[:, 0] > 0.5 target["masks"] = interpolated_masks.numpy() return rescaled_image, target # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor._normalize def _normalize(self, image, mean, std, target=None): """ Normalize the image with a certain mean and std. If given, also normalize the target bounding boxes based on the size of the image. """ image = self.normalize(image, mean=mean, std=std) if target is None: return image, None target = target.copy() h, w = image.shape[-2:] if "boxes" in target: boxes = target["boxes"] boxes = corners_to_center_format(boxes) boxes = boxes / np.asarray([w, h, w, h], dtype=np.float32) target["boxes"] = boxes return image, target def __call__( self, images: ImageInput, annotations: Union[List[Dict], List[List[Dict]]] = None, return_segmentation_masks: Optional[bool] = False, masks_path: Optional[pathlib.Path] = None, padding: Optional[bool] = True, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ) -> BatchFeature: """ Main method to prepare for the model one or several image(s) and optional annotations. Images are by default padded up to the largest image in a batch. <Tip warning={true}> NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass PIL images. </Tip> Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. annotations (`Dict`, `List[Dict]`, *optional*): The corresponding annotations in COCO format. In case [`DetrFeatureExtractor`] was initialized with `format = "coco_detection"`, the annotations for each image should have the following format: {'image_id': int, 'annotations': [annotation]}, with the annotations being a list of COCO object annotations. In case [`DetrFeatureExtractor`] was initialized with `format = "coco_panoptic"`, the annotations for each image should have the following format: {'image_id': int, 'file_name': str, 'segments_info': [segment_info]} with segments_info being a list of COCO panoptic annotations. return_segmentation_masks (`Dict`, `List[Dict]`, *optional*, defaults to `False`): Whether to also include instance segmentation masks as part of the labels in case `format = "coco_detection"`. masks_path (`pathlib.Path`, *optional*): Path to the directory containing the PNG files that store the class-agnostic image segmentations. Only relevant in case [`DetrFeatureExtractor`] was initialized with `format = "coco_panoptic"`. padding (`bool`, *optional*, defaults to `True`): Whether or not to pad images up to the largest image in a batch. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **pixel_values** -- Pixel values to be fed to a model. - **labels** -- Optional labels to be fed to a model (when `annotations` are provided) """ # Input type checking for clearer error valid_images = False valid_annotations = False valid_masks_path = False # Check that images has a valid type if isinstance(images, (Image.Image, np.ndarray)) or is_torch_tensor(images): valid_images = True elif isinstance(images, (list, tuple)): if len(images) == 0 or isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0]): valid_images = True if not valid_images: raise ValueError( "Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example), " "`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples)." ) is_batched = bool( isinstance(images, (list, tuple)) and (isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0])) ) # Check that annotations has a valid type if annotations is not None: if not is_batched: if self.format == "coco_detection": if isinstance(annotations, dict) and "image_id" in annotations and "annotations" in annotations: if isinstance(annotations["annotations"], (list, tuple)): # an image can have no annotations if len(annotations["annotations"]) == 0 or isinstance(annotations["annotations"][0], dict): valid_annotations = True elif self.format == "coco_panoptic": if isinstance(annotations, dict) and "image_id" in annotations and "segments_info" in annotations: if isinstance(annotations["segments_info"], (list, tuple)): # an image can have no segments (?) if len(annotations["segments_info"]) == 0 or isinstance( annotations["segments_info"][0], dict ): valid_annotations = True else: if isinstance(annotations, (list, tuple)): if len(images) != len(annotations): raise ValueError("There must be as many annotations as there are images") if isinstance(annotations[0], Dict): if self.format == "coco_detection": if isinstance(annotations[0]["annotations"], (list, tuple)): valid_annotations = True elif self.format == "coco_panoptic": if isinstance(annotations[0]["segments_info"], (list, tuple)): valid_annotations = True if not valid_annotations: raise ValueError( """ Annotations must of type `Dict` (single image) or `List[Dict]` (batch of images). In case of object detection, each dictionary should contain the keys 'image_id' and 'annotations', with the latter being a list of annotations in COCO format. In case of panoptic segmentation, each dictionary should contain the keys 'file_name', 'image_id' and 'segments_info', with the latter being a list of annotations in COCO format. """ ) # Check that masks_path has a valid type if masks_path is not None: if self.format == "coco_panoptic": if isinstance(masks_path, pathlib.Path): valid_masks_path = True if not valid_masks_path: raise ValueError( "The path to the directory containing the mask PNG files should be provided as a" " `pathlib.Path` object." ) if not is_batched: images = [images] if annotations is not None: annotations = [annotations] # prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image) if annotations is not None: for idx, (image, target) in enumerate(zip(images, annotations)): if not isinstance(image, Image.Image): image = self.to_pil_image(image) image, target = self.prepare(image, target, return_segmentation_masks, masks_path) images[idx] = image annotations[idx] = target # transformations (resizing + normalization) if self.do_resize and self.size is not None: if annotations is not None: for idx, (image, target) in enumerate(zip(images, annotations)): image, target = self._resize(image=image, target=target, size=self.size, max_size=self.max_size) images[idx] = image annotations[idx] = target else: for idx, image in enumerate(images): images[idx] = self._resize(image=image, target=None, size=self.size, max_size=self.max_size)[0] if self.do_normalize: if annotations is not None: for idx, (image, target) in enumerate(zip(images, annotations)): image, target = self._normalize( image=image, mean=self.image_mean, std=self.image_std, target=target ) images[idx] = image annotations[idx] = target else: images = [ self._normalize(image=image, mean=self.image_mean, std=self.image_std)[0] for image in images ] if padding: # pad images up to largest image in batch max_size = self._max_by_axis([list(image.shape) for image in images]) c, h, w = max_size padded_images = [] for image in images: # create padded image padded_image = np.zeros((c, h, w), dtype=np.float32) padded_image[: image.shape[0], : image.shape[1], : image.shape[2]] = np.copy(image) padded_images.append(padded_image) images = padded_images # return as BatchFeature data = {} data["pixel_values"] = images encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) if annotations is not None: # Convert to TensorType tensor_type = return_tensors if not isinstance(tensor_type, TensorType): tensor_type = TensorType(tensor_type) if not tensor_type == TensorType.PYTORCH: raise ValueError("Only PyTorch is supported for the moment.") else: if not is_torch_available(): raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed.") encoded_inputs["labels"] = [ {k: torch.from_numpy(v) for k, v in target.items()} for target in annotations ] return encoded_inputs # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor._max_by_axis def _max_by_axis(self, the_list): # type: (List[List[int]]) -> List[int] maxes = the_list[0] for sublist in the_list[1:]: for index, item in enumerate(sublist): maxes[index] = max(maxes[index], item) return maxes def pad(self, pixel_values_list: List["torch.Tensor"], return_tensors: Optional[Union[str, TensorType]] = None): """ Pad images up to the largest image in a batch. Args: pixel_values_list (`List[torch.Tensor]`): List of images (pixel values) to be padded. Each image should be a tensor of shape (C, H, W). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following field: - **pixel_values** -- Pixel values to be fed to a model. """ max_size = self._max_by_axis([list(image.shape) for image in pixel_values_list]) c, h, w = max_size padded_images = [] for image in pixel_values_list: # create padded image padded_image = np.zeros((c, h, w), dtype=np.float32) padded_image[: image.shape[0], : image.shape[1], : image.shape[2]] = np.copy(image) padded_images.append(padded_image) # return as BatchFeature data = {"pixel_values": padded_images} encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) return encoded_inputs # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.post_process def post_process(self, outputs, target_sizes): """ Converts the output of [`DetrForObjectDetection`] into the format expected by the COCO api. Only supports PyTorch. Args: outputs ([`DetrObjectDetectionOutput`]): Raw outputs of the model. target_sizes (`torch.Tensor` of shape `(batch_size, 2)`): Tensor containing the size (height, width) of each image of the batch. For evaluation, this must be the original image size (before any data augmentation). For visualization, this should be the image size after data augment, but before padding. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ warnings.warn( "`post_process` is deprecated and will be removed in v5 of Transformers, please use" " `post_process_object_detection`", FutureWarning, ) out_logits, out_bbox = outputs.logits, outputs.pred_boxes if len(out_logits) != len(target_sizes): raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits") if target_sizes.shape[1] != 2: raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch") prob = nn.functional.softmax(out_logits, -1) scores, labels = prob[..., :-1].max(-1) # convert to [x0, y0, x1, y1] format boxes = center_to_corners_format(out_bbox) # and from relative [0, 1] to absolute [0, height] coordinates img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) boxes = boxes * scale_fct[:, None, :] results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)] return results # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.post_process_object_detection with Detr->Yolos def post_process_object_detection( self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, List[Tuple]] = None ): """ Converts the output of [`YolosForObjectDetection`] into the format expected by the COCO api. Only supports PyTorch. Args: outputs ([`YolosObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*, defaults to `None`): Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the batch. If left to None, predictions will not be resized. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ out_logits, out_bbox = outputs.logits, outputs.pred_boxes if target_sizes is not None: if len(out_logits) != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) prob = nn.functional.softmax(out_logits, -1) scores, labels = prob[..., :-1].max(-1) # Convert to [x0, y0, x1, y1] format boxes = center_to_corners_format(out_bbox) # Convert from relative [0, 1] to absolute [0, height] coordinates if target_sizes is not None: if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) else: img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) boxes = boxes * scale_fct[:, None, :] results = [] for s, l, b in zip(scores, labels, boxes): score = s[s > threshold] label = l[s > threshold] box = b[s > threshold] results.append({"scores": score, "labels": label, "boxes": box}) return results
1
huggingface/transformers
20,304
fix device issue
# What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
ydshieh
"2022-11-17T17:48:16Z"
"2022-11-21T09:12:26Z"
d316037ad71f8748aac9045ffd96970826456a04
8503cc755050c6ed5bc771e3244c29b71be1841e
fix device issue. # What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
./tests/models/conditional_detr/test_modeling_conditional_detr.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Conditional DETR model. """ import inspect import math import unittest from transformers import ConditionalDetrConfig, is_timm_available, is_vision_available from transformers.testing_utils import require_timm, require_vision, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor if is_timm_available(): import torch from transformers import ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel if is_vision_available(): from PIL import Image from transformers import ConditionalDetrFeatureExtractor class ConditionalDetrModelTester: def __init__( self, parent, batch_size=8, is_training=True, use_labels=True, hidden_size=256, num_hidden_layers=2, num_attention_heads=8, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, num_queries=12, num_channels=3, min_size=200, max_size=200, n_targets=8, num_labels=91, ): self.parent = parent self.batch_size = batch_size self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.num_queries = num_queries self.num_channels = num_channels self.min_size = min_size self.max_size = max_size self.n_targets = n_targets self.num_labels = num_labels # we also set the expected seq length for both encoder and decoder self.encoder_seq_length = math.ceil(self.min_size / 32) * math.ceil(self.max_size / 32) self.decoder_seq_length = self.num_queries def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]) pixel_mask = torch.ones([self.batch_size, self.min_size, self.max_size], device=torch_device) labels = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) labels = [] for i in range(self.batch_size): target = {} target["class_labels"] = torch.randint( high=self.num_labels, size=(self.n_targets,), device=torch_device ) target["boxes"] = torch.rand(self.n_targets, 4, device=torch_device) target["masks"] = torch.rand(self.n_targets, self.min_size, self.max_size, device=torch_device) labels.append(target) config = self.get_config() return config, pixel_values, pixel_mask, labels def get_config(self): return ConditionalDetrConfig( d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, num_queries=self.num_queries, num_labels=self.num_labels, ) def prepare_config_and_inputs_for_common(self): config, pixel_values, pixel_mask, labels = self.prepare_config_and_inputs() inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def create_and_check_conditional_detr_model(self, config, pixel_values, pixel_mask, labels): model = ConditionalDetrModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.decoder_seq_length, self.hidden_size) ) def create_and_check_conditional_detr_object_detection_head_model(self, config, pixel_values, pixel_mask, labels): model = ConditionalDetrForObjectDetection(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, labels=labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) @require_timm class ConditionalDetrModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = ( ( ConditionalDetrModel, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ) if is_timm_available() else () ) is_encoder_decoder = True test_torchscript = False test_pruning = False test_head_masking = False test_missing_keys = False # special case for head models def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ in ["ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation"]: labels = [] for i in range(self.model_tester.batch_size): target = {} target["class_labels"] = torch.ones( size=(self.model_tester.n_targets,), device=torch_device, dtype=torch.long ) target["boxes"] = torch.ones( self.model_tester.n_targets, 4, device=torch_device, dtype=torch.float ) target["masks"] = torch.ones( self.model_tester.n_targets, self.model_tester.min_size, self.model_tester.max_size, device=torch_device, dtype=torch.float, ) labels.append(target) inputs_dict["labels"] = labels return inputs_dict def setUp(self): self.model_tester = ConditionalDetrModelTester(self) self.config_tester = ConfigTester(self, config_class=ConditionalDetrConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() def test_conditional_detr_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_conditional_detr_model(*config_and_inputs) def test_conditional_detr_object_detection_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_conditional_detr_object_detection_head_model(*config_and_inputs) @unittest.skip(reason="Conditional DETR does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Conditional DETR does not have a get_input_embeddings method") def test_model_common_attributes(self): pass @unittest.skip(reason="Conditional DETR is not a generative model") def test_generate_without_input_ids(self): pass @unittest.skip(reason="Conditional DETR does not use token embeddings") def test_resize_tokens_embeddings(self): pass @slow def test_model_outputs_equivalence(self): # TODO Niels: fix me! pass def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True decoder_seq_length = self.model_tester.decoder_seq_length encoder_seq_length = self.model_tester.encoder_seq_length decoder_key_length = self.model_tester.decoder_seq_length encoder_key_length = self.model_tester.encoder_seq_length for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 6 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning # Object Detection model returns pred_logits and pred_boxes if model_class.__name__ == "ConditionalDetrForObjectDetection": correct_outlen += 1 # Panoptic Segmentation model returns pred_logits, pred_boxes, pred_masks if model_class.__name__ == "ConditionalDetrForSegmentation": correct_outlen += 2 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def test_retain_grad_hidden_states_attentions(self): # removed retain_grad and grad on decoder_hidden_states, as queries don't require grad config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_attentions = outputs.encoder_attentions[0] encoder_hidden_states.retain_grad() encoder_attentions.retain_grad() decoder_attentions = outputs.decoder_attentions[0] decoder_attentions.retain_grad() cross_attentions = outputs.cross_attentions[0] cross_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(encoder_attentions.grad) self.assertIsNotNone(decoder_attentions.grad) self.assertIsNotNone(cross_attentions.grad) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = ["pixel_values", "pixel_mask"] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" in arg_names else [] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) else: expected_arg_names = ["pixel_values", "pixel_mask"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_different_timm_backbone(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # let's pick a random timm backbone config.backbone = "tf_mobilenetv3_small_075" for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if model_class.__name__ == "ConditionalDetrForObjectDetection": expected_shape = ( self.model_tester.batch_size, self.model_tester.num_queries, self.model_tester.num_labels, ) self.assertEqual(outputs.logits.shape, expected_shape) self.assertTrue(outputs) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) configs_no_init.init_xavier_std = 1e9 for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if "bbox_attention" in name and "bias" not in name: self.assertLess( 100000, abs(param.data.max().item()), msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) TOLERANCE = 1e-4 # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_timm @require_vision @slow class ConditionalDetrModelIntegrationTests(unittest.TestCase): @cached_property def default_feature_extractor(self): return ( ConditionalDetrFeatureExtractor.from_pretrained("microsoft/conditional-detr-resnet-50") if is_vision_available() else None ) def test_inference_no_head(self): model = ConditionalDetrModel.from_pretrained("microsoft/conditional-detr-resnet-50").to(torch_device) feature_extractor = self.default_feature_extractor image = prepare_img() encoding = feature_extractor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**encoding) expected_shape = torch.Size((1, 300, 256)) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) expected_slice = torch.tensor( [[0.4222, 0.7471, 0.8760], [0.6395, -0.2729, 0.7127], [-0.3090, 0.7642, 0.9529]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4)) def test_inference_object_detection_head(self): model = ConditionalDetrForObjectDetection.from_pretrained("microsoft/conditional-detr-resnet-50").to( torch_device ) feature_extractor = self.default_feature_extractor image = prepare_img() encoding = feature_extractor(images=image, return_tensors="pt").to(torch_device) pixel_values = encoding["pixel_values"].to(torch_device) pixel_mask = encoding["pixel_mask"].to(torch_device) with torch.no_grad(): outputs = model(pixel_values, pixel_mask) # verify logits + box predictions expected_shape_logits = torch.Size((1, model.config.num_queries, model.config.num_labels)) self.assertEqual(outputs.logits.shape, expected_shape_logits) expected_slice_logits = torch.tensor( [[-10.4372, -5.7558, -8.6764], [-10.5410, -5.8704, -8.0590], [-10.6827, -6.3469, -8.3923]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice_logits, atol=1e-4)) expected_shape_boxes = torch.Size((1, model.config.num_queries, 4)) self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes) expected_slice_boxes = torch.tensor( [[0.7733, 0.6576, 0.4496], [0.5171, 0.1184, 0.9094], [0.8846, 0.5647, 0.2486]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4)) # verify postprocessing results = feature_extractor.post_process_object_detection( outputs, threshold=0.3, target_sizes=[image.size[::-1]] )[0] expected_scores = torch.tensor([0.8330, 0.8313, 0.8039, 0.6829, 0.5355]) expected_labels = [75, 17, 17, 75, 63] expected_slice_boxes = torch.tensor([38.3089, 72.1022, 177.6293, 118.4512]) self.assertEqual(len(results["scores"]), 5) self.assertTrue(torch.allclose(results["scores"], expected_scores, atol=1e-4)) self.assertSequenceEqual(results["labels"].tolist(), expected_labels) self.assertTrue(torch.allclose(results["boxes"][0, :], expected_slice_boxes))
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Conditional DETR model. """ import inspect import math import unittest from transformers import ConditionalDetrConfig, is_timm_available, is_vision_available from transformers.testing_utils import require_timm, require_vision, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor if is_timm_available(): import torch from transformers import ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel if is_vision_available(): from PIL import Image from transformers import ConditionalDetrFeatureExtractor class ConditionalDetrModelTester: def __init__( self, parent, batch_size=8, is_training=True, use_labels=True, hidden_size=256, num_hidden_layers=2, num_attention_heads=8, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, num_queries=12, num_channels=3, min_size=200, max_size=200, n_targets=8, num_labels=91, ): self.parent = parent self.batch_size = batch_size self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.num_queries = num_queries self.num_channels = num_channels self.min_size = min_size self.max_size = max_size self.n_targets = n_targets self.num_labels = num_labels # we also set the expected seq length for both encoder and decoder self.encoder_seq_length = math.ceil(self.min_size / 32) * math.ceil(self.max_size / 32) self.decoder_seq_length = self.num_queries def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]) pixel_mask = torch.ones([self.batch_size, self.min_size, self.max_size], device=torch_device) labels = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) labels = [] for i in range(self.batch_size): target = {} target["class_labels"] = torch.randint( high=self.num_labels, size=(self.n_targets,), device=torch_device ) target["boxes"] = torch.rand(self.n_targets, 4, device=torch_device) target["masks"] = torch.rand(self.n_targets, self.min_size, self.max_size, device=torch_device) labels.append(target) config = self.get_config() return config, pixel_values, pixel_mask, labels def get_config(self): return ConditionalDetrConfig( d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, num_queries=self.num_queries, num_labels=self.num_labels, ) def prepare_config_and_inputs_for_common(self): config, pixel_values, pixel_mask, labels = self.prepare_config_and_inputs() inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def create_and_check_conditional_detr_model(self, config, pixel_values, pixel_mask, labels): model = ConditionalDetrModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.decoder_seq_length, self.hidden_size) ) def create_and_check_conditional_detr_object_detection_head_model(self, config, pixel_values, pixel_mask, labels): model = ConditionalDetrForObjectDetection(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, labels=labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) @require_timm class ConditionalDetrModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = ( ( ConditionalDetrModel, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ) if is_timm_available() else () ) is_encoder_decoder = True test_torchscript = False test_pruning = False test_head_masking = False test_missing_keys = False # special case for head models def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ in ["ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation"]: labels = [] for i in range(self.model_tester.batch_size): target = {} target["class_labels"] = torch.ones( size=(self.model_tester.n_targets,), device=torch_device, dtype=torch.long ) target["boxes"] = torch.ones( self.model_tester.n_targets, 4, device=torch_device, dtype=torch.float ) target["masks"] = torch.ones( self.model_tester.n_targets, self.model_tester.min_size, self.model_tester.max_size, device=torch_device, dtype=torch.float, ) labels.append(target) inputs_dict["labels"] = labels return inputs_dict def setUp(self): self.model_tester = ConditionalDetrModelTester(self) self.config_tester = ConfigTester(self, config_class=ConditionalDetrConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() def test_conditional_detr_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_conditional_detr_model(*config_and_inputs) def test_conditional_detr_object_detection_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_conditional_detr_object_detection_head_model(*config_and_inputs) @unittest.skip(reason="Conditional DETR does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Conditional DETR does not have a get_input_embeddings method") def test_model_common_attributes(self): pass @unittest.skip(reason="Conditional DETR is not a generative model") def test_generate_without_input_ids(self): pass @unittest.skip(reason="Conditional DETR does not use token embeddings") def test_resize_tokens_embeddings(self): pass @slow def test_model_outputs_equivalence(self): # TODO Niels: fix me! pass def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True decoder_seq_length = self.model_tester.decoder_seq_length encoder_seq_length = self.model_tester.encoder_seq_length decoder_key_length = self.model_tester.decoder_seq_length encoder_key_length = self.model_tester.encoder_seq_length for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 6 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning # Object Detection model returns pred_logits and pred_boxes if model_class.__name__ == "ConditionalDetrForObjectDetection": correct_outlen += 1 # Panoptic Segmentation model returns pred_logits, pred_boxes, pred_masks if model_class.__name__ == "ConditionalDetrForSegmentation": correct_outlen += 2 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def test_retain_grad_hidden_states_attentions(self): # removed retain_grad and grad on decoder_hidden_states, as queries don't require grad config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_attentions = outputs.encoder_attentions[0] encoder_hidden_states.retain_grad() encoder_attentions.retain_grad() decoder_attentions = outputs.decoder_attentions[0] decoder_attentions.retain_grad() cross_attentions = outputs.cross_attentions[0] cross_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(encoder_attentions.grad) self.assertIsNotNone(decoder_attentions.grad) self.assertIsNotNone(cross_attentions.grad) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = ["pixel_values", "pixel_mask"] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" in arg_names else [] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) else: expected_arg_names = ["pixel_values", "pixel_mask"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_different_timm_backbone(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # let's pick a random timm backbone config.backbone = "tf_mobilenetv3_small_075" for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if model_class.__name__ == "ConditionalDetrForObjectDetection": expected_shape = ( self.model_tester.batch_size, self.model_tester.num_queries, self.model_tester.num_labels, ) self.assertEqual(outputs.logits.shape, expected_shape) self.assertTrue(outputs) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) configs_no_init.init_xavier_std = 1e9 for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if "bbox_attention" in name and "bias" not in name: self.assertLess( 100000, abs(param.data.max().item()), msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) TOLERANCE = 1e-4 # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_timm @require_vision @slow class ConditionalDetrModelIntegrationTests(unittest.TestCase): @cached_property def default_feature_extractor(self): return ( ConditionalDetrFeatureExtractor.from_pretrained("microsoft/conditional-detr-resnet-50") if is_vision_available() else None ) def test_inference_no_head(self): model = ConditionalDetrModel.from_pretrained("microsoft/conditional-detr-resnet-50").to(torch_device) feature_extractor = self.default_feature_extractor image = prepare_img() encoding = feature_extractor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**encoding) expected_shape = torch.Size((1, 300, 256)) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) expected_slice = torch.tensor( [[0.4222, 0.7471, 0.8760], [0.6395, -0.2729, 0.7127], [-0.3090, 0.7642, 0.9529]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4)) def test_inference_object_detection_head(self): model = ConditionalDetrForObjectDetection.from_pretrained("microsoft/conditional-detr-resnet-50").to( torch_device ) feature_extractor = self.default_feature_extractor image = prepare_img() encoding = feature_extractor(images=image, return_tensors="pt").to(torch_device) pixel_values = encoding["pixel_values"].to(torch_device) pixel_mask = encoding["pixel_mask"].to(torch_device) with torch.no_grad(): outputs = model(pixel_values, pixel_mask) # verify logits + box predictions expected_shape_logits = torch.Size((1, model.config.num_queries, model.config.num_labels)) self.assertEqual(outputs.logits.shape, expected_shape_logits) expected_slice_logits = torch.tensor( [[-10.4372, -5.7558, -8.6764], [-10.5410, -5.8704, -8.0590], [-10.6827, -6.3469, -8.3923]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice_logits, atol=1e-4)) expected_shape_boxes = torch.Size((1, model.config.num_queries, 4)) self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes) expected_slice_boxes = torch.tensor( [[0.7733, 0.6576, 0.4496], [0.5171, 0.1184, 0.9094], [0.8846, 0.5647, 0.2486]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4)) # verify postprocessing results = feature_extractor.post_process_object_detection( outputs, threshold=0.3, target_sizes=[image.size[::-1]] )[0] expected_scores = torch.tensor([0.8330, 0.8313, 0.8039, 0.6829, 0.5355]).to(torch_device) expected_labels = [75, 17, 17, 75, 63] expected_slice_boxes = torch.tensor([38.3089, 72.1022, 177.6293, 118.4512]).to(torch_device) self.assertEqual(len(results["scores"]), 5) self.assertTrue(torch.allclose(results["scores"], expected_scores, atol=1e-4)) self.assertSequenceEqual(results["labels"].tolist(), expected_labels) self.assertTrue(torch.allclose(results["boxes"][0, :], expected_slice_boxes))
1
huggingface/transformers
20,304
fix device issue
# What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
ydshieh
"2022-11-17T17:48:16Z"
"2022-11-21T09:12:26Z"
d316037ad71f8748aac9045ffd96970826456a04
8503cc755050c6ed5bc771e3244c29b71be1841e
fix device issue. # What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
./tests/models/deformable_detr/test_modeling_deformable_detr.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Deformable DETR model. """ import inspect import math import unittest from typing import Dict, List, Tuple from transformers import DeformableDetrConfig, is_timm_available, is_vision_available from transformers.file_utils import cached_property from transformers.testing_utils import require_timm, require_torch_gpu, require_vision, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor if is_timm_available(): import torch from transformers import DeformableDetrForObjectDetection, DeformableDetrModel if is_vision_available(): from PIL import Image from transformers import AutoFeatureExtractor class DeformableDetrModelTester: def __init__( self, parent, batch_size=8, is_training=True, use_labels=True, hidden_size=256, num_hidden_layers=2, num_attention_heads=8, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, num_queries=12, num_channels=3, image_size=196, n_targets=8, num_labels=91, num_feature_levels=4, encoder_n_points=2, decoder_n_points=6, ): self.parent = parent self.batch_size = batch_size self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.num_queries = num_queries self.num_channels = num_channels self.image_size = image_size self.n_targets = n_targets self.num_labels = num_labels self.num_feature_levels = num_feature_levels self.encoder_n_points = encoder_n_points self.decoder_n_points = decoder_n_points # we also set the expected seq length for both encoder and decoder self.encoder_seq_length = ( math.ceil(self.image_size / 8) ** 2 + math.ceil(self.image_size / 16) ** 2 + math.ceil(self.image_size / 32) ** 2 + math.ceil(self.image_size / 64) ** 2 ) self.decoder_seq_length = self.num_queries def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) pixel_mask = torch.ones([self.batch_size, self.image_size, self.image_size], device=torch_device) labels = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) labels = [] for i in range(self.batch_size): target = {} target["class_labels"] = torch.randint( high=self.num_labels, size=(self.n_targets,), device=torch_device ) target["boxes"] = torch.rand(self.n_targets, 4, device=torch_device) target["masks"] = torch.rand(self.n_targets, self.image_size, self.image_size, device=torch_device) labels.append(target) config = self.get_config() return config, pixel_values, pixel_mask, labels def get_config(self): return DeformableDetrConfig( d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, num_queries=self.num_queries, num_labels=self.num_labels, num_feature_levels=self.num_feature_levels, encoder_n_points=self.encoder_n_points, decoder_n_points=self.decoder_n_points, ) def prepare_config_and_inputs_for_common(self): config, pixel_values, pixel_mask, labels = self.prepare_config_and_inputs() inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def create_and_check_deformable_detr_model(self, config, pixel_values, pixel_mask, labels): model = DeformableDetrModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.num_queries, self.hidden_size)) def create_and_check_deformable_detr_object_detection_head_model(self, config, pixel_values, pixel_mask, labels): model = DeformableDetrForObjectDetection(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, labels=labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) @require_timm class DeformableDetrModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (DeformableDetrModel, DeformableDetrForObjectDetection) if is_timm_available() else () is_encoder_decoder = True test_torchscript = False test_pruning = False test_head_masking = False test_missing_keys = False # special case for head models def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "DeformableDetrForObjectDetection": labels = [] for i in range(self.model_tester.batch_size): target = {} target["class_labels"] = torch.ones( size=(self.model_tester.n_targets,), device=torch_device, dtype=torch.long ) target["boxes"] = torch.ones( self.model_tester.n_targets, 4, device=torch_device, dtype=torch.float ) target["masks"] = torch.ones( self.model_tester.n_targets, self.model_tester.image_size, self.model_tester.image_size, device=torch_device, dtype=torch.float, ) labels.append(target) inputs_dict["labels"] = labels return inputs_dict def setUp(self): self.model_tester = DeformableDetrModelTester(self) self.config_tester = ConfigTester(self, config_class=DeformableDetrConfig, has_text_modality=False) def test_config(self): # we don't test common_properties and arguments_init as these don't apply for Deformable DETR self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() def test_deformable_detr_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deformable_detr_model(*config_and_inputs) def test_deformable_detr_object_detection_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deformable_detr_object_detection_head_model(*config_and_inputs) @unittest.skip(reason="Deformable DETR does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Deformable DETR does not have a get_input_embeddings method") def test_model_common_attributes(self): pass @unittest.skip(reason="Deformable DETR is not a generative model") def test_generate_without_input_ids(self): pass @unittest.skip(reason="Deformable DETR does not use token embeddings") def test_resize_tokens_embeddings(self): pass @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, self.model_tester.num_feature_levels, self.model_tester.encoder_n_points, ], ) out_len = len(outputs) correct_outlen = 8 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning # Object Detection model returns pred_logits and pred_boxes if model_class.__name__ == "DeformableDetrForObjectDetection": correct_outlen += 2 self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, self.model_tester.num_queries, self.model_tester.num_queries], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, self.model_tester.num_feature_levels, self.model_tester.decoder_n_points, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, self.model_tester.num_feature_levels, self.model_tester.encoder_n_points, ], ) def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(t): t[t != t] = 0 return t def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): with torch.no_grad(): tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, Dict): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values(), dict_object.values() ): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 ), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: print("Model class:", model_class) model = model_class(config) model.to(torch_device) model.eval() tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence( model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True} ) def test_retain_grad_hidden_states_attentions(self): # removed retain_grad and grad on decoder_hidden_states, as queries don't require grad config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) # we take the second output since last_hidden_state is the second item output = outputs[1] encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_attentions = outputs.encoder_attentions[0] encoder_hidden_states.retain_grad() encoder_attentions.retain_grad() decoder_attentions = outputs.decoder_attentions[0] decoder_attentions.retain_grad() cross_attentions = outputs.cross_attentions[0] cross_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(encoder_attentions.grad) self.assertIsNotNone(decoder_attentions.grad) self.assertIsNotNone(cross_attentions.grad) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = ["pixel_values", "pixel_mask"] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" in arg_names else [] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) else: expected_arg_names = ["pixel_values", "pixel_mask"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_different_timm_backbone(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # let's pick a random timm backbone config.backbone = "tf_mobilenetv3_small_075" for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if model_class.__name__ == "DeformableDetrForObjectDetection": expected_shape = ( self.model_tester.batch_size, self.model_tester.num_queries, self.model_tester.num_labels, ) self.assertEqual(outputs.logits.shape, expected_shape) self.assertTrue(outputs) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: print("Model class:", model_class) model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if param.requires_grad: if ( "level_embed" in name or "sampling_offsets.bias" in name or "value_proj" in name or "output_proj" in name or "reference_points" in name ): continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) TOLERANCE = 1e-4 # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_timm @require_vision @slow class DeformableDetrModelIntegrationTests(unittest.TestCase): @cached_property def default_feature_extractor(self): return AutoFeatureExtractor.from_pretrained("SenseTime/deformable-detr") if is_vision_available() else None def test_inference_object_detection_head(self): model = DeformableDetrForObjectDetection.from_pretrained("SenseTime/deformable-detr").to(torch_device) feature_extractor = self.default_feature_extractor image = prepare_img() encoding = feature_extractor(images=image, return_tensors="pt").to(torch_device) pixel_values = encoding["pixel_values"].to(torch_device) pixel_mask = encoding["pixel_mask"].to(torch_device) with torch.no_grad(): outputs = model(pixel_values, pixel_mask) expected_shape_logits = torch.Size((1, model.config.num_queries, model.config.num_labels)) self.assertEqual(outputs.logits.shape, expected_shape_logits) expected_logits = torch.tensor( [[-9.6645, -4.3449, -5.8705], [-9.7035, -3.8504, -5.0724], [-10.5634, -5.3379, -7.5116]] ).to(torch_device) expected_boxes = torch.tensor( [[0.8693, 0.2289, 0.2492], [0.3150, 0.5489, 0.5845], [0.5563, 0.7580, 0.8518]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_logits, atol=1e-4)) expected_shape_boxes = torch.Size((1, model.config.num_queries, 4)) self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes, atol=1e-4)) # verify postprocessing results = feature_extractor.post_process_object_detection( outputs, threshold=0.3, target_sizes=[image.size[::-1]] )[0] expected_scores = torch.tensor([0.7999, 0.7894, 0.6331, 0.4720, 0.4382]) expected_labels = [17, 17, 75, 75, 63] expected_slice_boxes = torch.tensor([16.5028, 52.8390, 318.2544, 470.7841]) self.assertEqual(len(results["scores"]), 5) self.assertTrue(torch.allclose(results["scores"], expected_scores, atol=1e-4)) self.assertSequenceEqual(results["labels"].tolist(), expected_labels) self.assertTrue(torch.allclose(results["boxes"][0, :], expected_slice_boxes)) def test_inference_object_detection_head_with_box_refine_two_stage(self): model = DeformableDetrForObjectDetection.from_pretrained( "SenseTime/deformable-detr-with-box-refine-two-stage" ).to(torch_device) feature_extractor = self.default_feature_extractor image = prepare_img() encoding = feature_extractor(images=image, return_tensors="pt").to(torch_device) pixel_values = encoding["pixel_values"].to(torch_device) pixel_mask = encoding["pixel_mask"].to(torch_device) with torch.no_grad(): outputs = model(pixel_values, pixel_mask) expected_shape_logits = torch.Size((1, model.config.num_queries, model.config.num_labels)) self.assertEqual(outputs.logits.shape, expected_shape_logits) expected_logits = torch.tensor( [[-6.7108, -4.3213, -6.3777], [-8.9014, -6.1799, -6.7240], [-6.9315, -4.4735, -6.2298]] ).to(torch_device) expected_boxes = torch.tensor( [[0.2583, 0.5499, 0.4683], [0.7652, 0.9068, 0.4882], [0.5490, 0.2763, 0.0564]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_logits, atol=1e-4)) expected_shape_boxes = torch.Size((1, model.config.num_queries, 4)) self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes, atol=1e-4)) @require_torch_gpu def test_inference_object_detection_head_equivalence_cpu_gpu(self): feature_extractor = self.default_feature_extractor image = prepare_img() encoding = feature_extractor(images=image, return_tensors="pt") pixel_values = encoding["pixel_values"] pixel_mask = encoding["pixel_mask"] # 1. run model on CPU model = DeformableDetrForObjectDetection.from_pretrained("SenseTime/deformable-detr-single-scale") with torch.no_grad(): cpu_outputs = model(pixel_values, pixel_mask) # 2. run model on GPU model.to("cuda") with torch.no_grad(): gpu_outputs = model(pixel_values.to("cuda"), pixel_mask.to("cuda")) # 3. assert equivalence for key in cpu_outputs.keys(): assert torch.allclose(cpu_outputs[key], gpu_outputs[key].cpu(), atol=1e-4) expected_logits = torch.tensor( [[-9.9051, -4.2541, -6.4852], [-9.6947, -4.0854, -6.8033], [-10.0665, -5.8470, -7.7003]] ) assert torch.allclose(cpu_outputs.logits[0, :3, :3], expected_logits, atol=1e-4)
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Deformable DETR model. """ import inspect import math import unittest from typing import Dict, List, Tuple from transformers import DeformableDetrConfig, is_timm_available, is_vision_available from transformers.file_utils import cached_property from transformers.testing_utils import require_timm, require_torch_gpu, require_vision, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor if is_timm_available(): import torch from transformers import DeformableDetrForObjectDetection, DeformableDetrModel if is_vision_available(): from PIL import Image from transformers import AutoFeatureExtractor class DeformableDetrModelTester: def __init__( self, parent, batch_size=8, is_training=True, use_labels=True, hidden_size=256, num_hidden_layers=2, num_attention_heads=8, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, num_queries=12, num_channels=3, image_size=196, n_targets=8, num_labels=91, num_feature_levels=4, encoder_n_points=2, decoder_n_points=6, ): self.parent = parent self.batch_size = batch_size self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.num_queries = num_queries self.num_channels = num_channels self.image_size = image_size self.n_targets = n_targets self.num_labels = num_labels self.num_feature_levels = num_feature_levels self.encoder_n_points = encoder_n_points self.decoder_n_points = decoder_n_points # we also set the expected seq length for both encoder and decoder self.encoder_seq_length = ( math.ceil(self.image_size / 8) ** 2 + math.ceil(self.image_size / 16) ** 2 + math.ceil(self.image_size / 32) ** 2 + math.ceil(self.image_size / 64) ** 2 ) self.decoder_seq_length = self.num_queries def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) pixel_mask = torch.ones([self.batch_size, self.image_size, self.image_size], device=torch_device) labels = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) labels = [] for i in range(self.batch_size): target = {} target["class_labels"] = torch.randint( high=self.num_labels, size=(self.n_targets,), device=torch_device ) target["boxes"] = torch.rand(self.n_targets, 4, device=torch_device) target["masks"] = torch.rand(self.n_targets, self.image_size, self.image_size, device=torch_device) labels.append(target) config = self.get_config() return config, pixel_values, pixel_mask, labels def get_config(self): return DeformableDetrConfig( d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, num_queries=self.num_queries, num_labels=self.num_labels, num_feature_levels=self.num_feature_levels, encoder_n_points=self.encoder_n_points, decoder_n_points=self.decoder_n_points, ) def prepare_config_and_inputs_for_common(self): config, pixel_values, pixel_mask, labels = self.prepare_config_and_inputs() inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def create_and_check_deformable_detr_model(self, config, pixel_values, pixel_mask, labels): model = DeformableDetrModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.num_queries, self.hidden_size)) def create_and_check_deformable_detr_object_detection_head_model(self, config, pixel_values, pixel_mask, labels): model = DeformableDetrForObjectDetection(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, labels=labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) @require_timm class DeformableDetrModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (DeformableDetrModel, DeformableDetrForObjectDetection) if is_timm_available() else () is_encoder_decoder = True test_torchscript = False test_pruning = False test_head_masking = False test_missing_keys = False # special case for head models def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "DeformableDetrForObjectDetection": labels = [] for i in range(self.model_tester.batch_size): target = {} target["class_labels"] = torch.ones( size=(self.model_tester.n_targets,), device=torch_device, dtype=torch.long ) target["boxes"] = torch.ones( self.model_tester.n_targets, 4, device=torch_device, dtype=torch.float ) target["masks"] = torch.ones( self.model_tester.n_targets, self.model_tester.image_size, self.model_tester.image_size, device=torch_device, dtype=torch.float, ) labels.append(target) inputs_dict["labels"] = labels return inputs_dict def setUp(self): self.model_tester = DeformableDetrModelTester(self) self.config_tester = ConfigTester(self, config_class=DeformableDetrConfig, has_text_modality=False) def test_config(self): # we don't test common_properties and arguments_init as these don't apply for Deformable DETR self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() def test_deformable_detr_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deformable_detr_model(*config_and_inputs) def test_deformable_detr_object_detection_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deformable_detr_object_detection_head_model(*config_and_inputs) @unittest.skip(reason="Deformable DETR does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Deformable DETR does not have a get_input_embeddings method") def test_model_common_attributes(self): pass @unittest.skip(reason="Deformable DETR is not a generative model") def test_generate_without_input_ids(self): pass @unittest.skip(reason="Deformable DETR does not use token embeddings") def test_resize_tokens_embeddings(self): pass @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, self.model_tester.num_feature_levels, self.model_tester.encoder_n_points, ], ) out_len = len(outputs) correct_outlen = 8 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning # Object Detection model returns pred_logits and pred_boxes if model_class.__name__ == "DeformableDetrForObjectDetection": correct_outlen += 2 self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, self.model_tester.num_queries, self.model_tester.num_queries], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, self.model_tester.num_feature_levels, self.model_tester.decoder_n_points, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, self.model_tester.num_feature_levels, self.model_tester.encoder_n_points, ], ) def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(t): t[t != t] = 0 return t def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): with torch.no_grad(): tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, Dict): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values(), dict_object.values() ): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 ), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: print("Model class:", model_class) model = model_class(config) model.to(torch_device) model.eval() tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence( model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True} ) def test_retain_grad_hidden_states_attentions(self): # removed retain_grad and grad on decoder_hidden_states, as queries don't require grad config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) # we take the second output since last_hidden_state is the second item output = outputs[1] encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_attentions = outputs.encoder_attentions[0] encoder_hidden_states.retain_grad() encoder_attentions.retain_grad() decoder_attentions = outputs.decoder_attentions[0] decoder_attentions.retain_grad() cross_attentions = outputs.cross_attentions[0] cross_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(encoder_attentions.grad) self.assertIsNotNone(decoder_attentions.grad) self.assertIsNotNone(cross_attentions.grad) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = ["pixel_values", "pixel_mask"] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" in arg_names else [] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) else: expected_arg_names = ["pixel_values", "pixel_mask"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_different_timm_backbone(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # let's pick a random timm backbone config.backbone = "tf_mobilenetv3_small_075" for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if model_class.__name__ == "DeformableDetrForObjectDetection": expected_shape = ( self.model_tester.batch_size, self.model_tester.num_queries, self.model_tester.num_labels, ) self.assertEqual(outputs.logits.shape, expected_shape) self.assertTrue(outputs) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: print("Model class:", model_class) model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if param.requires_grad: if ( "level_embed" in name or "sampling_offsets.bias" in name or "value_proj" in name or "output_proj" in name or "reference_points" in name ): continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) TOLERANCE = 1e-4 # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_timm @require_vision @slow class DeformableDetrModelIntegrationTests(unittest.TestCase): @cached_property def default_feature_extractor(self): return AutoFeatureExtractor.from_pretrained("SenseTime/deformable-detr") if is_vision_available() else None def test_inference_object_detection_head(self): model = DeformableDetrForObjectDetection.from_pretrained("SenseTime/deformable-detr").to(torch_device) feature_extractor = self.default_feature_extractor image = prepare_img() encoding = feature_extractor(images=image, return_tensors="pt").to(torch_device) pixel_values = encoding["pixel_values"].to(torch_device) pixel_mask = encoding["pixel_mask"].to(torch_device) with torch.no_grad(): outputs = model(pixel_values, pixel_mask) expected_shape_logits = torch.Size((1, model.config.num_queries, model.config.num_labels)) self.assertEqual(outputs.logits.shape, expected_shape_logits) expected_logits = torch.tensor( [[-9.6645, -4.3449, -5.8705], [-9.7035, -3.8504, -5.0724], [-10.5634, -5.3379, -7.5116]] ).to(torch_device) expected_boxes = torch.tensor( [[0.8693, 0.2289, 0.2492], [0.3150, 0.5489, 0.5845], [0.5563, 0.7580, 0.8518]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_logits, atol=1e-4)) expected_shape_boxes = torch.Size((1, model.config.num_queries, 4)) self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes, atol=1e-4)) # verify postprocessing results = feature_extractor.post_process_object_detection( outputs, threshold=0.3, target_sizes=[image.size[::-1]] )[0] expected_scores = torch.tensor([0.7999, 0.7894, 0.6331, 0.4720, 0.4382]).to(torch_device) expected_labels = [17, 17, 75, 75, 63] expected_slice_boxes = torch.tensor([16.5028, 52.8390, 318.2544, 470.7841]).to(torch_device) self.assertEqual(len(results["scores"]), 5) self.assertTrue(torch.allclose(results["scores"], expected_scores, atol=1e-4)) self.assertSequenceEqual(results["labels"].tolist(), expected_labels) self.assertTrue(torch.allclose(results["boxes"][0, :], expected_slice_boxes)) def test_inference_object_detection_head_with_box_refine_two_stage(self): model = DeformableDetrForObjectDetection.from_pretrained( "SenseTime/deformable-detr-with-box-refine-two-stage" ).to(torch_device) feature_extractor = self.default_feature_extractor image = prepare_img() encoding = feature_extractor(images=image, return_tensors="pt").to(torch_device) pixel_values = encoding["pixel_values"].to(torch_device) pixel_mask = encoding["pixel_mask"].to(torch_device) with torch.no_grad(): outputs = model(pixel_values, pixel_mask) expected_shape_logits = torch.Size((1, model.config.num_queries, model.config.num_labels)) self.assertEqual(outputs.logits.shape, expected_shape_logits) expected_logits = torch.tensor( [[-6.7108, -4.3213, -6.3777], [-8.9014, -6.1799, -6.7240], [-6.9315, -4.4735, -6.2298]] ).to(torch_device) expected_boxes = torch.tensor( [[0.2583, 0.5499, 0.4683], [0.7652, 0.9068, 0.4882], [0.5490, 0.2763, 0.0564]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_logits, atol=1e-4)) expected_shape_boxes = torch.Size((1, model.config.num_queries, 4)) self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes, atol=1e-4)) @require_torch_gpu def test_inference_object_detection_head_equivalence_cpu_gpu(self): feature_extractor = self.default_feature_extractor image = prepare_img() encoding = feature_extractor(images=image, return_tensors="pt") pixel_values = encoding["pixel_values"] pixel_mask = encoding["pixel_mask"] # 1. run model on CPU model = DeformableDetrForObjectDetection.from_pretrained("SenseTime/deformable-detr-single-scale") with torch.no_grad(): cpu_outputs = model(pixel_values, pixel_mask) # 2. run model on GPU model.to("cuda") with torch.no_grad(): gpu_outputs = model(pixel_values.to("cuda"), pixel_mask.to("cuda")) # 3. assert equivalence for key in cpu_outputs.keys(): assert torch.allclose(cpu_outputs[key], gpu_outputs[key].cpu(), atol=1e-4) expected_logits = torch.tensor( [[-9.9051, -4.2541, -6.4852], [-9.6947, -4.0854, -6.8033], [-10.0665, -5.8470, -7.7003]] ) assert torch.allclose(cpu_outputs.logits[0, :3, :3], expected_logits, atol=1e-4)
1
huggingface/transformers
20,304
fix device issue
# What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
ydshieh
"2022-11-17T17:48:16Z"
"2022-11-21T09:12:26Z"
d316037ad71f8748aac9045ffd96970826456a04
8503cc755050c6ed5bc771e3244c29b71be1841e
fix device issue. # What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
./examples/tensorflow/translation/run_translation.py
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for translation. """ # You can also adapt this script on your own sequence to sequence task. Pointers for this are left as comments. import json import logging import os import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import tensorflow as tf from datasets import load_dataset import evaluate import transformers from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForSeq2Seq, HfArgumentParser, KerasMetricCallback, M2M100Tokenizer, MBart50Tokenizer, MBart50TokenizerFast, MBartTokenizer, MBartTokenizerFast, PushToHubCallback, TFAutoModelForSeq2SeqLM, TFTrainingArguments, create_optimizer, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # region Dependencies and constants # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.25.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt") logger = logging.getLogger(__name__) MULTILINGUAL_TOKENIZERS = [MBartTokenizer, MBartTokenizerFast, MBart50Tokenizer, MBart50TokenizerFast, M2M100Tokenizer] # endregion # region Arguments @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ source_lang: str = field(default=None, metadata={"help": "Source language id for translation."}) target_lang: str = field(default=None, metadata={"help": "Target language id for translation."}) dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field( default=None, metadata={"help": "The input training data file (a jsonlines or csv file)."} ) validation_file: Optional[str] = field( default=None, metadata={ "help": ( "An optional input evaluation data file to evaluate the metrics (rouge) on (a jsonlines or csv file)." ) }, ) test_file: Optional[str] = field( default=None, metadata={ "help": "An optional input test data file to evaluate the metrics (rouge) on (a jsonlines or csv file)." }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) max_source_length: Optional[int] = field( default=1024, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) max_target_length: Optional[int] = field( default=128, metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) val_max_target_length: Optional[int] = field( default=None, metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`." "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) }, ) pad_to_max_length: bool = field( default=False, metadata={ "help": ( "Whether to pad all samples to model maximum sentence length. " "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " "efficient on GPU but very bad for TPU." ) }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) max_predict_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) }, ) num_beams: Optional[int] = field( default=None, metadata={ "help": ( "Number of beams to use for evaluation. This argument will be passed to ``model.generate``, " "which is used during ``evaluate`` and ``predict``." ) }, ) ignore_pad_token_for_loss: bool = field( default=True, metadata={ "help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not." }, ) source_prefix: Optional[str] = field( default=None, metadata={"help": "A prefix to add before every source text (useful for T5 models)."} ) forced_bos_token: Optional[str] = field( default=None, metadata={ "help": ( "The token to force as the first generated token after the :obj:`decoder_start_token_id`.Useful for" " multilingual models like :doc:`mBART <../model_doc/mbart>` where the first generated token needs to" " be the target language token.(Usually it is the target language token)" ) }, ) def __post_init__(self): if self.dataset_name is None and self.train_file is None and self.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." if self.val_max_target_length is None: self.val_max_target_length = self.max_target_length # endregion def main(): # region Argument parsing # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_translation", model_args, data_args, framework="tensorflow") # endregion # region Logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logger.setLevel(logging.INFO) datasets.utils.logging.set_verbosity(logging.INFO) transformers.utils.logging.set_verbosity(logging.INFO) # Log on each process the small summary: logger.info(f"Training/evaluation parameters {training_args}") # endregion # region Detecting last checkpoint last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # endregion # Set seed before initializing model. set_seed(training_args.seed) # region Load datasets # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files this script will use the first column for the full texts and the second column for the # summaries (unless you specify column names for this with the `text_column` and `summary_column` arguments). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file extension = data_args.train_file.split(".")[-1] if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.validation_file.split(".")[-1] raw_datasets = load_dataset( extension, data_files=data_files, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # endregion # region Load model config and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) prefix = data_args.source_prefix if data_args.source_prefix is not None else "" # endregion # region Dataset preprocessing # We need to tokenize inputs and targets. if training_args.do_train: column_names = raw_datasets["train"].column_names elif training_args.do_eval: column_names = raw_datasets["validation"].column_names else: logger.info("There is nothing to do. Please pass `do_train`, and/or `do_eval`.") return column_names = raw_datasets["train"].column_names # For translation we set the codes of our source and target languages (only useful for mBART, the others will # ignore those attributes). if isinstance(tokenizer, tuple(MULTILINGUAL_TOKENIZERS)): assert data_args.target_lang is not None and data_args.source_lang is not None, ( f"{tokenizer.__class__.__name__} is a multilingual tokenizer which requires --source_lang and " "--target_lang arguments." ) tokenizer.src_lang = data_args.source_lang tokenizer.tgt_lang = data_args.target_lang forced_bos_token_id = ( tokenizer.lang_code_to_id[data_args.forced_bos_token] if data_args.forced_bos_token is not None else None ) # Get the language codes for input/target. source_lang = data_args.source_lang.split("_")[0] target_lang = data_args.target_lang.split("_")[0] padding = "max_length" if data_args.pad_to_max_length else False # Temporarily set max_target_length for training. max_target_length = data_args.max_target_length padding = "max_length" if data_args.pad_to_max_length else False def preprocess_function(examples): inputs = [ex[source_lang] for ex in examples["translation"]] targets = [ex[target_lang] for ex in examples["translation"]] inputs = [prefix + inp for inp in inputs] model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True) # Tokenize targets with the `text_target` keyword argument labels = tokenizer(text_target=targets, max_length=max_target_length, padding=padding, truncation=True) # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore # padding in the loss. if padding == "max_length" and data_args.ignore_pad_token_for_loss: labels["input_ids"] = [ [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"] ] model_inputs["labels"] = labels["input_ids"] return model_inputs if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset") train_dataset = raw_datasets["train"] if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) with training_args.main_process_first(desc="train dataset map pre-processing"): train_dataset = train_dataset.map( preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on train dataset", ) else: train_dataset = None if training_args.do_eval: max_target_length = data_args.val_max_target_length if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = raw_datasets["validation"] if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) with training_args.main_process_first(desc="validation dataset map pre-processing"): eval_dataset = eval_dataset.map( preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on validation dataset", ) else: eval_dataset = None # endregion with training_args.strategy.scope(): # region Prepare model model = TFAutoModelForSeq2SeqLM.from_pretrained( model_args.model_name_or_path, config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch # on a small vocab and want a smaller embedding size, remove this test. embedding_size = model.get_input_embeddings().weight.shape[0] if len(tokenizer) > embedding_size: model.resize_token_embeddings(len(tokenizer)) if isinstance(tokenizer, tuple(MULTILINGUAL_TOKENIZERS)): model.config.forced_bos_token_id = forced_bos_token_id # endregion # region Set decoder_start_token_id if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)): assert ( data_args.target_lang is not None and data_args.source_lang is not None ), "mBart requires --target_lang and --source_lang" if isinstance(tokenizer, MBartTokenizer): model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.target_lang] else: model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(data_args.target_lang) if model.config.decoder_start_token_id is None: raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") # endregion # region Prepare TF Dataset objects label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id data_collator = DataCollatorForSeq2Seq( tokenizer, model=model, label_pad_token_id=label_pad_token_id, pad_to_multiple_of=64, # Reduce the number of unique shapes for XLA, especially for generation return_tensors="tf", ) num_replicas = training_args.strategy.num_replicas_in_sync total_train_batch_size = training_args.per_device_train_batch_size * num_replicas total_eval_batch_size = training_args.per_device_eval_batch_size * num_replicas dataset_options = tf.data.Options() dataset_options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF # model.prepare_tf_dataset() wraps a Hugging Face dataset in a tf.data.Dataset which is ready to use in # training. This is the recommended way to use a Hugging Face dataset when training with Keras. You can also # use the lower-level dataset.to_tf_dataset() method, but you will have to specify things like column names # yourself if you use this method, whereas they are automatically inferred from the model input names when # using model.prepare_tf_dataset() # For more info see the docs: # https://huggingface.co/docs/transformers/main/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset # https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.to_tf_dataset tf_train_dataset = model.prepare_tf_dataset( train_dataset, collate_fn=data_collator, batch_size=total_train_batch_size, shuffle=True, ).with_options(dataset_options) tf_eval_dataset = model.prepare_tf_dataset( eval_dataset, collate_fn=data_collator, batch_size=total_eval_batch_size, shuffle=False ).with_options(dataset_options) # endregion # region Optimizer and LR scheduling num_train_steps = int(len(tf_train_dataset) * training_args.num_train_epochs) if training_args.warmup_steps > 0: num_warmup_steps = training_args.warmup_steps elif training_args.warmup_ratio > 0: num_warmup_steps = int(num_train_steps * training_args.warmup_ratio) else: num_warmup_steps = 0 if training_args.do_train: optimizer, lr_schedule = create_optimizer( init_lr=training_args.learning_rate, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps, adam_beta1=training_args.adam_beta1, adam_beta2=training_args.adam_beta2, adam_epsilon=training_args.adam_epsilon, weight_decay_rate=training_args.weight_decay, adam_global_clipnorm=training_args.max_grad_norm, ) else: optimizer = None # endregion # region Metric and postprocessing if training_args.do_eval: metric = evaluate.load("sacrebleu") if data_args.val_max_target_length is None: data_args.val_max_target_length = data_args.max_target_length gen_kwargs = { "max_length": data_args.val_max_target_length, "num_beams": data_args.num_beams, "no_repeat_ngram_size": 0, # Not supported under XLA right now, and some models set it by default } def postprocess_text(preds, labels): preds = [pred.strip() for pred in preds] labels = [[label.strip()] for label in labels] return preds, labels def compute_metrics(preds): predictions, labels = preds if isinstance(predictions, tuple): predictions = predictions[0] decoded_preds = tokenizer.batch_decode(predictions, skip_special_tokens=True) labels = np.where(labels != -100, labels, tokenizer.pad_token_id) decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels) metrics = metric.compute(predictions=decoded_preds, references=decoded_labels) return {"bleu": metrics["score"]} # The KerasMetricCallback allows metrics that are too complex to write as standard Keras metrics # to be computed each epoch. Any Python code can be included in the metric_fn. This is especially # useful for metrics like BLEU and ROUGE that perform string comparisons on decoded model outputs. # For more information, see the docs at # https://huggingface.co/docs/transformers/main_classes/keras_callbacks#transformers.KerasMetricCallback metric_callback = KerasMetricCallback( metric_fn=compute_metrics, eval_dataset=tf_eval_dataset, predict_with_generate=True, use_xla_generation=True, generate_kwargs=gen_kwargs, ) callbacks = [metric_callback] else: callbacks = [] # endregion # region Preparing push_to_hub and model card push_to_hub_model_id = training_args.push_to_hub_model_id model_name = model_args.model_name_or_path.split("/")[-1] if not push_to_hub_model_id: push_to_hub_model_id = f"{model_name}-finetuned-{data_args.source_lang}-{data_args.target_lang}" model_card_kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "translation"} if data_args.dataset_name is not None: model_card_kwargs["dataset_tags"] = data_args.dataset_name if data_args.dataset_config_name is not None: model_card_kwargs["dataset_args"] = data_args.dataset_config_name model_card_kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" else: model_card_kwargs["dataset"] = data_args.dataset_name languages = [l for l in [data_args.source_lang, data_args.target_lang] if l is not None] if len(languages) > 0: model_card_kwargs["language"] = languages if training_args.push_to_hub: # Because this training can be quite long, we save once per epoch. callbacks.append( PushToHubCallback( output_dir=training_args.output_dir, model_id=push_to_hub_model_id, organization=training_args.push_to_hub_organization, token=training_args.push_to_hub_token, tokenizer=tokenizer, **model_card_kwargs, ) ) # endregion # region Training eval_metrics = None model.compile(optimizer=optimizer, jit_compile=training_args.xla) if training_args.do_train: logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {training_args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {training_args.per_device_train_batch_size}") logger.info(f" Total train batch size = {total_train_batch_size}") logger.info(f" Total optimization steps = {num_train_steps}") if training_args.xla and not data_args.pad_to_max_length: logger.warning( "XLA training may be slow at first when --pad_to_max_length is not set " "until all possible shapes have been compiled." ) history = model.fit(tf_train_dataset, epochs=int(training_args.num_train_epochs), callbacks=callbacks) eval_metrics = {key: val[-1] for key, val in history.history.items()} # endregion # region Validation if training_args.do_eval and not training_args.do_train: # Compiling generation with XLA yields enormous speedups, see https://huggingface.co/blog/tf-xla-generate @tf.function(jit_compile=True) def generate(**kwargs): return model.generate(**kwargs) if training_args.do_eval: logger.info("Evaluation...") for batch, labels in tf_eval_dataset: batch.update(gen_kwargs) generated_tokens = generate(**batch) if isinstance(generated_tokens, tuple): generated_tokens = generated_tokens[0] decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) labels = np.where(labels != -100, labels, tokenizer.pad_token_id) decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels) metric.add_batch(predictions=decoded_preds, references=decoded_labels) eval_metrics = metric.compute() logger.info({"bleu": eval_metrics["score"]}) # endregion if training_args.output_dir is not None and eval_metrics is not None: output_eval_file = os.path.join(training_args.output_dir, "all_results.json") with open(output_eval_file, "w") as writer: writer.write(json.dumps(eval_metrics)) if training_args.output_dir is not None and not training_args.push_to_hub: # If we're not pushing to hub, at least save a local copy when we're done model.save_pretrained(training_args.output_dir) if __name__ == "__main__": main()
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for translation. """ # You can also adapt this script on your own sequence to sequence task. Pointers for this are left as comments. import json import logging import os import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import tensorflow as tf from datasets import load_dataset import evaluate import transformers from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForSeq2Seq, HfArgumentParser, KerasMetricCallback, M2M100Tokenizer, MBart50Tokenizer, MBart50TokenizerFast, MBartTokenizer, MBartTokenizerFast, PushToHubCallback, TFAutoModelForSeq2SeqLM, TFTrainingArguments, create_optimizer, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # region Dependencies and constants # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.25.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt") logger = logging.getLogger(__name__) MULTILINGUAL_TOKENIZERS = [MBartTokenizer, MBartTokenizerFast, MBart50Tokenizer, MBart50TokenizerFast, M2M100Tokenizer] # endregion # region Arguments @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ source_lang: str = field(default=None, metadata={"help": "Source language id for translation."}) target_lang: str = field(default=None, metadata={"help": "Target language id for translation."}) dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field( default=None, metadata={"help": "The input training data file (a jsonlines or csv file)."} ) validation_file: Optional[str] = field( default=None, metadata={ "help": ( "An optional input evaluation data file to evaluate the metrics (rouge) on (a jsonlines or csv file)." ) }, ) test_file: Optional[str] = field( default=None, metadata={ "help": "An optional input test data file to evaluate the metrics (rouge) on (a jsonlines or csv file)." }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) max_source_length: Optional[int] = field( default=1024, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) max_target_length: Optional[int] = field( default=128, metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) val_max_target_length: Optional[int] = field( default=None, metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`." "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) }, ) pad_to_max_length: bool = field( default=False, metadata={ "help": ( "Whether to pad all samples to model maximum sentence length. " "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " "efficient on GPU but very bad for TPU." ) }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) max_predict_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) }, ) num_beams: Optional[int] = field( default=None, metadata={ "help": ( "Number of beams to use for evaluation. This argument will be passed to ``model.generate``, " "which is used during ``evaluate`` and ``predict``." ) }, ) ignore_pad_token_for_loss: bool = field( default=True, metadata={ "help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not." }, ) source_prefix: Optional[str] = field( default=None, metadata={"help": "A prefix to add before every source text (useful for T5 models)."} ) forced_bos_token: Optional[str] = field( default=None, metadata={ "help": ( "The token to force as the first generated token after the :obj:`decoder_start_token_id`.Useful for" " multilingual models like :doc:`mBART <../model_doc/mbart>` where the first generated token needs to" " be the target language token.(Usually it is the target language token)" ) }, ) def __post_init__(self): if self.dataset_name is None and self.train_file is None and self.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." if self.val_max_target_length is None: self.val_max_target_length = self.max_target_length # endregion def main(): # region Argument parsing # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_translation", model_args, data_args, framework="tensorflow") # endregion # region Logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logger.setLevel(logging.INFO) datasets.utils.logging.set_verbosity(logging.INFO) transformers.utils.logging.set_verbosity(logging.INFO) # Log on each process the small summary: logger.info(f"Training/evaluation parameters {training_args}") # endregion # region Detecting last checkpoint last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # endregion # Set seed before initializing model. set_seed(training_args.seed) # region Load datasets # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files this script will use the first column for the full texts and the second column for the # summaries (unless you specify column names for this with the `text_column` and `summary_column` arguments). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file extension = data_args.train_file.split(".")[-1] if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.validation_file.split(".")[-1] raw_datasets = load_dataset( extension, data_files=data_files, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # endregion # region Load model config and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) prefix = data_args.source_prefix if data_args.source_prefix is not None else "" # endregion # region Dataset preprocessing # We need to tokenize inputs and targets. if training_args.do_train: column_names = raw_datasets["train"].column_names elif training_args.do_eval: column_names = raw_datasets["validation"].column_names else: logger.info("There is nothing to do. Please pass `do_train`, and/or `do_eval`.") return column_names = raw_datasets["train"].column_names # For translation we set the codes of our source and target languages (only useful for mBART, the others will # ignore those attributes). if isinstance(tokenizer, tuple(MULTILINGUAL_TOKENIZERS)): assert data_args.target_lang is not None and data_args.source_lang is not None, ( f"{tokenizer.__class__.__name__} is a multilingual tokenizer which requires --source_lang and " "--target_lang arguments." ) tokenizer.src_lang = data_args.source_lang tokenizer.tgt_lang = data_args.target_lang forced_bos_token_id = ( tokenizer.lang_code_to_id[data_args.forced_bos_token] if data_args.forced_bos_token is not None else None ) # Get the language codes for input/target. source_lang = data_args.source_lang.split("_")[0] target_lang = data_args.target_lang.split("_")[0] padding = "max_length" if data_args.pad_to_max_length else False # Temporarily set max_target_length for training. max_target_length = data_args.max_target_length padding = "max_length" if data_args.pad_to_max_length else False def preprocess_function(examples): inputs = [ex[source_lang] for ex in examples["translation"]] targets = [ex[target_lang] for ex in examples["translation"]] inputs = [prefix + inp for inp in inputs] model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True) # Tokenize targets with the `text_target` keyword argument labels = tokenizer(text_target=targets, max_length=max_target_length, padding=padding, truncation=True) # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore # padding in the loss. if padding == "max_length" and data_args.ignore_pad_token_for_loss: labels["input_ids"] = [ [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"] ] model_inputs["labels"] = labels["input_ids"] return model_inputs if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset") train_dataset = raw_datasets["train"] if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) with training_args.main_process_first(desc="train dataset map pre-processing"): train_dataset = train_dataset.map( preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on train dataset", ) else: train_dataset = None if training_args.do_eval: max_target_length = data_args.val_max_target_length if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = raw_datasets["validation"] if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) with training_args.main_process_first(desc="validation dataset map pre-processing"): eval_dataset = eval_dataset.map( preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on validation dataset", ) else: eval_dataset = None # endregion with training_args.strategy.scope(): # region Prepare model model = TFAutoModelForSeq2SeqLM.from_pretrained( model_args.model_name_or_path, config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch # on a small vocab and want a smaller embedding size, remove this test. embedding_size = model.get_input_embeddings().weight.shape[0] if len(tokenizer) > embedding_size: model.resize_token_embeddings(len(tokenizer)) if isinstance(tokenizer, tuple(MULTILINGUAL_TOKENIZERS)): model.config.forced_bos_token_id = forced_bos_token_id # endregion # region Set decoder_start_token_id if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)): assert ( data_args.target_lang is not None and data_args.source_lang is not None ), "mBart requires --target_lang and --source_lang" if isinstance(tokenizer, MBartTokenizer): model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.target_lang] else: model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(data_args.target_lang) if model.config.decoder_start_token_id is None: raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") # endregion # region Prepare TF Dataset objects label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id data_collator = DataCollatorForSeq2Seq( tokenizer, model=model, label_pad_token_id=label_pad_token_id, pad_to_multiple_of=64, # Reduce the number of unique shapes for XLA, especially for generation return_tensors="tf", ) num_replicas = training_args.strategy.num_replicas_in_sync total_train_batch_size = training_args.per_device_train_batch_size * num_replicas total_eval_batch_size = training_args.per_device_eval_batch_size * num_replicas dataset_options = tf.data.Options() dataset_options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF # model.prepare_tf_dataset() wraps a Hugging Face dataset in a tf.data.Dataset which is ready to use in # training. This is the recommended way to use a Hugging Face dataset when training with Keras. You can also # use the lower-level dataset.to_tf_dataset() method, but you will have to specify things like column names # yourself if you use this method, whereas they are automatically inferred from the model input names when # using model.prepare_tf_dataset() # For more info see the docs: # https://huggingface.co/docs/transformers/main/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset # https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.to_tf_dataset tf_train_dataset = model.prepare_tf_dataset( train_dataset, collate_fn=data_collator, batch_size=total_train_batch_size, shuffle=True, ).with_options(dataset_options) tf_eval_dataset = model.prepare_tf_dataset( eval_dataset, collate_fn=data_collator, batch_size=total_eval_batch_size, shuffle=False ).with_options(dataset_options) # endregion # region Optimizer and LR scheduling num_train_steps = int(len(tf_train_dataset) * training_args.num_train_epochs) if training_args.warmup_steps > 0: num_warmup_steps = training_args.warmup_steps elif training_args.warmup_ratio > 0: num_warmup_steps = int(num_train_steps * training_args.warmup_ratio) else: num_warmup_steps = 0 if training_args.do_train: optimizer, lr_schedule = create_optimizer( init_lr=training_args.learning_rate, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps, adam_beta1=training_args.adam_beta1, adam_beta2=training_args.adam_beta2, adam_epsilon=training_args.adam_epsilon, weight_decay_rate=training_args.weight_decay, adam_global_clipnorm=training_args.max_grad_norm, ) else: optimizer = None # endregion # region Metric and postprocessing if training_args.do_eval: metric = evaluate.load("sacrebleu") if data_args.val_max_target_length is None: data_args.val_max_target_length = data_args.max_target_length gen_kwargs = { "max_length": data_args.val_max_target_length, "num_beams": data_args.num_beams, "no_repeat_ngram_size": 0, # Not supported under XLA right now, and some models set it by default } def postprocess_text(preds, labels): preds = [pred.strip() for pred in preds] labels = [[label.strip()] for label in labels] return preds, labels def compute_metrics(preds): predictions, labels = preds if isinstance(predictions, tuple): predictions = predictions[0] decoded_preds = tokenizer.batch_decode(predictions, skip_special_tokens=True) labels = np.where(labels != -100, labels, tokenizer.pad_token_id) decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels) metrics = metric.compute(predictions=decoded_preds, references=decoded_labels) return {"bleu": metrics["score"]} # The KerasMetricCallback allows metrics that are too complex to write as standard Keras metrics # to be computed each epoch. Any Python code can be included in the metric_fn. This is especially # useful for metrics like BLEU and ROUGE that perform string comparisons on decoded model outputs. # For more information, see the docs at # https://huggingface.co/docs/transformers/main_classes/keras_callbacks#transformers.KerasMetricCallback metric_callback = KerasMetricCallback( metric_fn=compute_metrics, eval_dataset=tf_eval_dataset, predict_with_generate=True, use_xla_generation=True, generate_kwargs=gen_kwargs, ) callbacks = [metric_callback] else: callbacks = [] # endregion # region Preparing push_to_hub and model card push_to_hub_model_id = training_args.push_to_hub_model_id model_name = model_args.model_name_or_path.split("/")[-1] if not push_to_hub_model_id: push_to_hub_model_id = f"{model_name}-finetuned-{data_args.source_lang}-{data_args.target_lang}" model_card_kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "translation"} if data_args.dataset_name is not None: model_card_kwargs["dataset_tags"] = data_args.dataset_name if data_args.dataset_config_name is not None: model_card_kwargs["dataset_args"] = data_args.dataset_config_name model_card_kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" else: model_card_kwargs["dataset"] = data_args.dataset_name languages = [l for l in [data_args.source_lang, data_args.target_lang] if l is not None] if len(languages) > 0: model_card_kwargs["language"] = languages if training_args.push_to_hub: # Because this training can be quite long, we save once per epoch. callbacks.append( PushToHubCallback( output_dir=training_args.output_dir, model_id=push_to_hub_model_id, organization=training_args.push_to_hub_organization, token=training_args.push_to_hub_token, tokenizer=tokenizer, **model_card_kwargs, ) ) # endregion # region Training eval_metrics = None model.compile(optimizer=optimizer, jit_compile=training_args.xla) if training_args.do_train: logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {training_args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {training_args.per_device_train_batch_size}") logger.info(f" Total train batch size = {total_train_batch_size}") logger.info(f" Total optimization steps = {num_train_steps}") if training_args.xla and not data_args.pad_to_max_length: logger.warning( "XLA training may be slow at first when --pad_to_max_length is not set " "until all possible shapes have been compiled." ) history = model.fit(tf_train_dataset, epochs=int(training_args.num_train_epochs), callbacks=callbacks) eval_metrics = {key: val[-1] for key, val in history.history.items()} # endregion # region Validation if training_args.do_eval and not training_args.do_train: # Compiling generation with XLA yields enormous speedups, see https://huggingface.co/blog/tf-xla-generate @tf.function(jit_compile=True) def generate(**kwargs): return model.generate(**kwargs) if training_args.do_eval: logger.info("Evaluation...") for batch, labels in tf_eval_dataset: batch.update(gen_kwargs) generated_tokens = generate(**batch) if isinstance(generated_tokens, tuple): generated_tokens = generated_tokens[0] decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) labels = np.where(labels != -100, labels, tokenizer.pad_token_id) decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels) metric.add_batch(predictions=decoded_preds, references=decoded_labels) eval_metrics = metric.compute() logger.info({"bleu": eval_metrics["score"]}) # endregion if training_args.output_dir is not None and eval_metrics is not None: output_eval_file = os.path.join(training_args.output_dir, "all_results.json") with open(output_eval_file, "w") as writer: writer.write(json.dumps(eval_metrics)) if training_args.output_dir is not None and not training_args.push_to_hub: # If we're not pushing to hub, at least save a local copy when we're done model.save_pretrained(training_args.output_dir) if __name__ == "__main__": main()
-1
huggingface/transformers
20,304
fix device issue
# What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
ydshieh
"2022-11-17T17:48:16Z"
"2022-11-21T09:12:26Z"
d316037ad71f8748aac9045ffd96970826456a04
8503cc755050c6ed5bc771e3244c29b71be1841e
fix device issue. # What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
./src/transformers/models/mobilebert/tokenization_mobilebert.py
# coding=utf-8 # # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for MobileBERT.""" import collections import os import unicodedata from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"} } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"mobilebert-uncased": 512} PRETRAINED_INIT_CONFIGURATION = {} # Copied from transformers.models.bert.tokenization_bert.load_vocab def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() with open(vocab_file, "r", encoding="utf-8") as reader: tokens = reader.readlines() for index, token in enumerate(tokens): token = token.rstrip("\n") vocab[token] = index return vocab # Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens # Copied from transformers.models.bert.tokenization_bert.BertTokenizer with BERT->MobileBERT,Bert->MobileBert class MobileBertTokenizer(PreTrainedTokenizer): r""" Construct a MobileBERT tokenizer. Based on WordPiece. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. do_basic_tokenize (`bool`, *optional*, defaults to `True`): Whether or not to do basic tokenization before WordPiece. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original MobileBERT). """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", tokenize_chinese_chars=True, strip_accents=None, **kwargs ): super().__init__( do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs, ) if not os.path.isfile(vocab_file): raise ValueError( f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained" " model use `tokenizer = MobileBertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer( do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, ) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token) @property def do_lower_case(self): return self.basic_tokenizer.do_lower_case @property def vocab_size(self): return len(self.vocab) def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder) def _tokenize(self, text): split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): # If the token is part of the never_split set if token in self.basic_tokenizer.never_split: split_tokens.append(token) else: split_tokens += self.wordpiece_tokenizer.tokenize(token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" out_string = " ".join(tokens).replace(" ##", "").strip() return out_string def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A MobileBERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A MobileBERT sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory with open(vocab_file, "w", encoding="utf-8") as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!" ) index = token_index writer.write(token + "\n") index += 1 return (vocab_file,) # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer class BasicTokenizer(object): """ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). Args: do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). """ def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None): if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents def tokenize(self, text, never_split=None): """ Basic Tokenization of a piece of text. Split on "white spaces" only, for sub-word tokenization, see WordPieceTokenizer. Args: never_split (`List[str]`, *optional*) Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of token not to split. """ # union() returns a new set by concatenating the two sets. never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text, never_split=None): """Splits punctuation on a piece of text.""" if never_split is not None and text in never_split: return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) # or (cp >= 0x20000 and cp <= 0x2A6DF) # or (cp >= 0x2A700 and cp <= 0x2B73F) # or (cp >= 0x2B740 and cp <= 0x2B81F) # or (cp >= 0x2B820 and cp <= 0x2CEAF) # or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) # ): # return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xFFFD or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) # Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer class WordpieceTokenizer(object): """Runs WordPiece tokenization.""" def __init__(self, vocab, unk_token, max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): """ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`. Args: text: A single token or whitespace separated tokens. This should have already been passed through *BasicTokenizer*. Returns: A list of wordpiece tokens. """ output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens
# coding=utf-8 # # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for MobileBERT.""" import collections import os import unicodedata from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"} } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"mobilebert-uncased": 512} PRETRAINED_INIT_CONFIGURATION = {} # Copied from transformers.models.bert.tokenization_bert.load_vocab def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() with open(vocab_file, "r", encoding="utf-8") as reader: tokens = reader.readlines() for index, token in enumerate(tokens): token = token.rstrip("\n") vocab[token] = index return vocab # Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens # Copied from transformers.models.bert.tokenization_bert.BertTokenizer with BERT->MobileBERT,Bert->MobileBert class MobileBertTokenizer(PreTrainedTokenizer): r""" Construct a MobileBERT tokenizer. Based on WordPiece. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. do_basic_tokenize (`bool`, *optional*, defaults to `True`): Whether or not to do basic tokenization before WordPiece. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original MobileBERT). """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", tokenize_chinese_chars=True, strip_accents=None, **kwargs ): super().__init__( do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs, ) if not os.path.isfile(vocab_file): raise ValueError( f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained" " model use `tokenizer = MobileBertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer( do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, ) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token) @property def do_lower_case(self): return self.basic_tokenizer.do_lower_case @property def vocab_size(self): return len(self.vocab) def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder) def _tokenize(self, text): split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): # If the token is part of the never_split set if token in self.basic_tokenizer.never_split: split_tokens.append(token) else: split_tokens += self.wordpiece_tokenizer.tokenize(token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" out_string = " ".join(tokens).replace(" ##", "").strip() return out_string def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A MobileBERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A MobileBERT sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory with open(vocab_file, "w", encoding="utf-8") as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!" ) index = token_index writer.write(token + "\n") index += 1 return (vocab_file,) # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer class BasicTokenizer(object): """ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). Args: do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). """ def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None): if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents def tokenize(self, text, never_split=None): """ Basic Tokenization of a piece of text. Split on "white spaces" only, for sub-word tokenization, see WordPieceTokenizer. Args: never_split (`List[str]`, *optional*) Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of token not to split. """ # union() returns a new set by concatenating the two sets. never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text, never_split=None): """Splits punctuation on a piece of text.""" if never_split is not None and text in never_split: return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) # or (cp >= 0x20000 and cp <= 0x2A6DF) # or (cp >= 0x2A700 and cp <= 0x2B73F) # or (cp >= 0x2B740 and cp <= 0x2B81F) # or (cp >= 0x2B820 and cp <= 0x2CEAF) # or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) # ): # return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xFFFD or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) # Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer class WordpieceTokenizer(object): """Runs WordPiece tokenization.""" def __init__(self, vocab, unk_token, max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): """ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`. Args: text: A single token or whitespace separated tokens. This should have already been passed through *BasicTokenizer*. Returns: A list of wordpiece tokens. """ output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens
-1
huggingface/transformers
20,304
fix device issue
# What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
ydshieh
"2022-11-17T17:48:16Z"
"2022-11-21T09:12:26Z"
d316037ad71f8748aac9045ffd96970826456a04
8503cc755050c6ed5bc771e3244c29b71be1841e
fix device issue. # What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
./tests/models/mobilevit/test_feature_extraction_mobilevit.py
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTFeatureExtractor class MobileViTFeatureExtractionTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_center_crop=True, crop_size=None, do_flip_channel_order=True, ): size = size if size is not None else {"shortest_edge": 20} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_flip_channel_order = do_flip_channel_order def prepare_feat_extract_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class MobileViTFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = MobileViTFeatureExtractor if is_vision_available() else None def setUp(self): self.feature_extract_tester = MobileViTFeatureExtractionTester(self) @property def feat_extract_dict(self): return self.feature_extract_tester.prepare_feat_extract_dict() def test_feat_extract_properties(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) self.assertTrue(hasattr(feature_extractor, "do_resize")) self.assertTrue(hasattr(feature_extractor, "size")) self.assertTrue(hasattr(feature_extractor, "do_center_crop")) self.assertTrue(hasattr(feature_extractor, "center_crop")) self.assertTrue(hasattr(feature_extractor, "do_flip_channel_order")) def test_batch_feature(self): pass def test_call_pil(self): # Initialize feature_extractor feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) # create random PIL images image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.feature_extract_tester.num_channels, self.feature_extract_tester.crop_size["height"], self.feature_extract_tester.crop_size["width"], ), ) # Test batched encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.feature_extract_tester.batch_size, self.feature_extract_tester.num_channels, self.feature_extract_tester.crop_size["height"], self.feature_extract_tester.crop_size["width"], ), ) def test_call_numpy(self): # Initialize feature_extractor feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) # create random numpy tensors image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.feature_extract_tester.num_channels, self.feature_extract_tester.crop_size["height"], self.feature_extract_tester.crop_size["width"], ), ) # Test batched encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.feature_extract_tester.batch_size, self.feature_extract_tester.num_channels, self.feature_extract_tester.crop_size["height"], self.feature_extract_tester.crop_size["width"], ), ) def test_call_pytorch(self): # Initialize feature_extractor feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) # create random PyTorch tensors image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.feature_extract_tester.num_channels, self.feature_extract_tester.crop_size["height"], self.feature_extract_tester.crop_size["width"], ), ) # Test batched encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.feature_extract_tester.batch_size, self.feature_extract_tester.num_channels, self.feature_extract_tester.crop_size["height"], self.feature_extract_tester.crop_size["width"], ), )
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTFeatureExtractor class MobileViTFeatureExtractionTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_center_crop=True, crop_size=None, do_flip_channel_order=True, ): size = size if size is not None else {"shortest_edge": 20} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_flip_channel_order = do_flip_channel_order def prepare_feat_extract_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class MobileViTFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = MobileViTFeatureExtractor if is_vision_available() else None def setUp(self): self.feature_extract_tester = MobileViTFeatureExtractionTester(self) @property def feat_extract_dict(self): return self.feature_extract_tester.prepare_feat_extract_dict() def test_feat_extract_properties(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) self.assertTrue(hasattr(feature_extractor, "do_resize")) self.assertTrue(hasattr(feature_extractor, "size")) self.assertTrue(hasattr(feature_extractor, "do_center_crop")) self.assertTrue(hasattr(feature_extractor, "center_crop")) self.assertTrue(hasattr(feature_extractor, "do_flip_channel_order")) def test_batch_feature(self): pass def test_call_pil(self): # Initialize feature_extractor feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) # create random PIL images image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.feature_extract_tester.num_channels, self.feature_extract_tester.crop_size["height"], self.feature_extract_tester.crop_size["width"], ), ) # Test batched encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.feature_extract_tester.batch_size, self.feature_extract_tester.num_channels, self.feature_extract_tester.crop_size["height"], self.feature_extract_tester.crop_size["width"], ), ) def test_call_numpy(self): # Initialize feature_extractor feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) # create random numpy tensors image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.feature_extract_tester.num_channels, self.feature_extract_tester.crop_size["height"], self.feature_extract_tester.crop_size["width"], ), ) # Test batched encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.feature_extract_tester.batch_size, self.feature_extract_tester.num_channels, self.feature_extract_tester.crop_size["height"], self.feature_extract_tester.crop_size["width"], ), ) def test_call_pytorch(self): # Initialize feature_extractor feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) # create random PyTorch tensors image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.feature_extract_tester.num_channels, self.feature_extract_tester.crop_size["height"], self.feature_extract_tester.crop_size["width"], ), ) # Test batched encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.feature_extract_tester.batch_size, self.feature_extract_tester.num_channels, self.feature_extract_tester.crop_size["height"], self.feature_extract_tester.crop_size["width"], ), )
-1
huggingface/transformers
20,304
fix device issue
# What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
ydshieh
"2022-11-17T17:48:16Z"
"2022-11-21T09:12:26Z"
d316037ad71f8748aac9045ffd96970826456a04
8503cc755050c6ed5bc771e3244c29b71be1841e
fix device issue. # What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
./tests/models/resnet/__init__.py
-1
huggingface/transformers
20,304
fix device issue
# What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
ydshieh
"2022-11-17T17:48:16Z"
"2022-11-21T09:12:26Z"
d316037ad71f8748aac9045ffd96970826456a04
8503cc755050c6ed5bc771e3244c29b71be1841e
fix device issue. # What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
./src/transformers/models/bert/tokenization_bert.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for Bert.""" import collections import os import unicodedata from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt", "bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt", "bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt", "bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt", "bert-base-multilingual-uncased": ( "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt" ), "bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt", "bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt", "bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt", "bert-large-uncased-whole-word-masking": ( "https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt" ), "bert-large-cased-whole-word-masking": ( "https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt" ), "bert-large-uncased-whole-word-masking-finetuned-squad": ( "https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt" ), "bert-large-cased-whole-word-masking-finetuned-squad": ( "https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt" ), "bert-base-cased-finetuned-mrpc": ( "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt" ), "bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt", "bert-base-german-dbmdz-uncased": ( "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt" ), "TurkuNLP/bert-base-finnish-cased-v1": ( "https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt" ), "TurkuNLP/bert-base-finnish-uncased-v1": ( "https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt" ), "wietsedv/bert-base-dutch-cased": ( "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt" ), } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "bert-base-uncased": 512, "bert-large-uncased": 512, "bert-base-cased": 512, "bert-large-cased": 512, "bert-base-multilingual-uncased": 512, "bert-base-multilingual-cased": 512, "bert-base-chinese": 512, "bert-base-german-cased": 512, "bert-large-uncased-whole-word-masking": 512, "bert-large-cased-whole-word-masking": 512, "bert-large-uncased-whole-word-masking-finetuned-squad": 512, "bert-large-cased-whole-word-masking-finetuned-squad": 512, "bert-base-cased-finetuned-mrpc": 512, "bert-base-german-dbmdz-cased": 512, "bert-base-german-dbmdz-uncased": 512, "TurkuNLP/bert-base-finnish-cased-v1": 512, "TurkuNLP/bert-base-finnish-uncased-v1": 512, "wietsedv/bert-base-dutch-cased": 512, } PRETRAINED_INIT_CONFIGURATION = { "bert-base-uncased": {"do_lower_case": True}, "bert-large-uncased": {"do_lower_case": True}, "bert-base-cased": {"do_lower_case": False}, "bert-large-cased": {"do_lower_case": False}, "bert-base-multilingual-uncased": {"do_lower_case": True}, "bert-base-multilingual-cased": {"do_lower_case": False}, "bert-base-chinese": {"do_lower_case": False}, "bert-base-german-cased": {"do_lower_case": False}, "bert-large-uncased-whole-word-masking": {"do_lower_case": True}, "bert-large-cased-whole-word-masking": {"do_lower_case": False}, "bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True}, "bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False}, "bert-base-cased-finetuned-mrpc": {"do_lower_case": False}, "bert-base-german-dbmdz-cased": {"do_lower_case": False}, "bert-base-german-dbmdz-uncased": {"do_lower_case": True}, "TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False}, "TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True}, "wietsedv/bert-base-dutch-cased": {"do_lower_case": False}, } def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() with open(vocab_file, "r", encoding="utf-8") as reader: tokens = reader.readlines() for index, token in enumerate(tokens): token = token.rstrip("\n") vocab[token] = index return vocab def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens class BertTokenizer(PreTrainedTokenizer): r""" Construct a BERT tokenizer. Based on WordPiece. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. do_basic_tokenize (`bool`, *optional*, defaults to `True`): Whether or not to do basic tokenization before WordPiece. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", tokenize_chinese_chars=True, strip_accents=None, **kwargs ): super().__init__( do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs, ) if not os.path.isfile(vocab_file): raise ValueError( f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained" " model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer( do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, ) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token) @property def do_lower_case(self): return self.basic_tokenizer.do_lower_case @property def vocab_size(self): return len(self.vocab) def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder) def _tokenize(self, text): split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): # If the token is part of the never_split set if token in self.basic_tokenizer.never_split: split_tokens.append(token) else: split_tokens += self.wordpiece_tokenizer.tokenize(token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" out_string = " ".join(tokens).replace(" ##", "").strip() return out_string def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory with open(vocab_file, "w", encoding="utf-8") as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!" ) index = token_index writer.write(token + "\n") index += 1 return (vocab_file,) class BasicTokenizer(object): """ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). Args: do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). """ def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None): if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents def tokenize(self, text, never_split=None): """ Basic Tokenization of a piece of text. Split on "white spaces" only, for sub-word tokenization, see WordPieceTokenizer. Args: never_split (`List[str]`, *optional*) Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of token not to split. """ # union() returns a new set by concatenating the two sets. never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text, never_split=None): """Splits punctuation on a piece of text.""" if never_split is not None and text in never_split: return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) # or (cp >= 0x20000 and cp <= 0x2A6DF) # or (cp >= 0x2A700 and cp <= 0x2B73F) # or (cp >= 0x2B740 and cp <= 0x2B81F) # or (cp >= 0x2B820 and cp <= 0x2CEAF) # or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) # ): # return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xFFFD or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) class WordpieceTokenizer(object): """Runs WordPiece tokenization.""" def __init__(self, vocab, unk_token, max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): """ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`. Args: text: A single token or whitespace separated tokens. This should have already been passed through *BasicTokenizer*. Returns: A list of wordpiece tokens. """ output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for Bert.""" import collections import os import unicodedata from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt", "bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt", "bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt", "bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt", "bert-base-multilingual-uncased": ( "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt" ), "bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt", "bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt", "bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt", "bert-large-uncased-whole-word-masking": ( "https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt" ), "bert-large-cased-whole-word-masking": ( "https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt" ), "bert-large-uncased-whole-word-masking-finetuned-squad": ( "https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt" ), "bert-large-cased-whole-word-masking-finetuned-squad": ( "https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt" ), "bert-base-cased-finetuned-mrpc": ( "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt" ), "bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt", "bert-base-german-dbmdz-uncased": ( "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt" ), "TurkuNLP/bert-base-finnish-cased-v1": ( "https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt" ), "TurkuNLP/bert-base-finnish-uncased-v1": ( "https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt" ), "wietsedv/bert-base-dutch-cased": ( "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt" ), } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "bert-base-uncased": 512, "bert-large-uncased": 512, "bert-base-cased": 512, "bert-large-cased": 512, "bert-base-multilingual-uncased": 512, "bert-base-multilingual-cased": 512, "bert-base-chinese": 512, "bert-base-german-cased": 512, "bert-large-uncased-whole-word-masking": 512, "bert-large-cased-whole-word-masking": 512, "bert-large-uncased-whole-word-masking-finetuned-squad": 512, "bert-large-cased-whole-word-masking-finetuned-squad": 512, "bert-base-cased-finetuned-mrpc": 512, "bert-base-german-dbmdz-cased": 512, "bert-base-german-dbmdz-uncased": 512, "TurkuNLP/bert-base-finnish-cased-v1": 512, "TurkuNLP/bert-base-finnish-uncased-v1": 512, "wietsedv/bert-base-dutch-cased": 512, } PRETRAINED_INIT_CONFIGURATION = { "bert-base-uncased": {"do_lower_case": True}, "bert-large-uncased": {"do_lower_case": True}, "bert-base-cased": {"do_lower_case": False}, "bert-large-cased": {"do_lower_case": False}, "bert-base-multilingual-uncased": {"do_lower_case": True}, "bert-base-multilingual-cased": {"do_lower_case": False}, "bert-base-chinese": {"do_lower_case": False}, "bert-base-german-cased": {"do_lower_case": False}, "bert-large-uncased-whole-word-masking": {"do_lower_case": True}, "bert-large-cased-whole-word-masking": {"do_lower_case": False}, "bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True}, "bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False}, "bert-base-cased-finetuned-mrpc": {"do_lower_case": False}, "bert-base-german-dbmdz-cased": {"do_lower_case": False}, "bert-base-german-dbmdz-uncased": {"do_lower_case": True}, "TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False}, "TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True}, "wietsedv/bert-base-dutch-cased": {"do_lower_case": False}, } def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() with open(vocab_file, "r", encoding="utf-8") as reader: tokens = reader.readlines() for index, token in enumerate(tokens): token = token.rstrip("\n") vocab[token] = index return vocab def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens class BertTokenizer(PreTrainedTokenizer): r""" Construct a BERT tokenizer. Based on WordPiece. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. do_basic_tokenize (`bool`, *optional*, defaults to `True`): Whether or not to do basic tokenization before WordPiece. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", tokenize_chinese_chars=True, strip_accents=None, **kwargs ): super().__init__( do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs, ) if not os.path.isfile(vocab_file): raise ValueError( f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained" " model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer( do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, ) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token) @property def do_lower_case(self): return self.basic_tokenizer.do_lower_case @property def vocab_size(self): return len(self.vocab) def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder) def _tokenize(self, text): split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): # If the token is part of the never_split set if token in self.basic_tokenizer.never_split: split_tokens.append(token) else: split_tokens += self.wordpiece_tokenizer.tokenize(token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" out_string = " ".join(tokens).replace(" ##", "").strip() return out_string def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory with open(vocab_file, "w", encoding="utf-8") as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!" ) index = token_index writer.write(token + "\n") index += 1 return (vocab_file,) class BasicTokenizer(object): """ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). Args: do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). """ def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None): if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents def tokenize(self, text, never_split=None): """ Basic Tokenization of a piece of text. Split on "white spaces" only, for sub-word tokenization, see WordPieceTokenizer. Args: never_split (`List[str]`, *optional*) Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of token not to split. """ # union() returns a new set by concatenating the two sets. never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text, never_split=None): """Splits punctuation on a piece of text.""" if never_split is not None and text in never_split: return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) # or (cp >= 0x20000 and cp <= 0x2A6DF) # or (cp >= 0x2A700 and cp <= 0x2B73F) # or (cp >= 0x2B740 and cp <= 0x2B81F) # or (cp >= 0x2B820 and cp <= 0x2CEAF) # or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) # ): # return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xFFFD or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) class WordpieceTokenizer(object): """Runs WordPiece tokenization.""" def __init__(self, vocab, unk_token, max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): """ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`. Args: text: A single token or whitespace separated tokens. This should have already been passed through *BasicTokenizer*. Returns: A list of wordpiece tokens. """ output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens
-1
huggingface/transformers
20,304
fix device issue
# What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
ydshieh
"2022-11-17T17:48:16Z"
"2022-11-21T09:12:26Z"
d316037ad71f8748aac9045ffd96970826456a04
8503cc755050c6ed5bc771e3244c29b71be1841e
fix device issue. # What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
./tests/models/rag/test_retrieval_rag.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class RagRetrieverTest(TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() self.retrieval_vector_size = 8 # DPR tok vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] dpr_tokenizer_path = os.path.join(self.tmpdirname, "dpr_tokenizer") os.makedirs(dpr_tokenizer_path, exist_ok=True) self.vocab_file = os.path.join(dpr_tokenizer_path, DPR_VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) # BART tok vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] self.special_tokens_map = {"unk_token": "<unk>"} bart_tokenizer_path = os.path.join(self.tmpdirname, "bart_tokenizer") os.makedirs(bart_tokenizer_path, exist_ok=True) self.vocab_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) def get_dpr_tokenizer(self) -> DPRQuestionEncoderTokenizer: return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer")) def get_dpr_ctx_encoder_tokenizer(self) -> DPRContextEncoderTokenizer: return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer")) def get_bart_tokenizer(self) -> BartTokenizer: return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, "bart_tokenizer")) def tearDown(self): shutil.rmtree(self.tmpdirname) def get_dummy_dataset(self): dataset = Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size), 2 * np.ones(self.retrieval_vector_size)], } ) dataset.add_faiss_index("embeddings", string_factory="Flat", metric_type=faiss.METRIC_INNER_PRODUCT) return dataset def get_dummy_canonical_hf_index_retriever(self): dataset = self.get_dummy_dataset() config = RagConfig( retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), ) with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset: mock_load_dataset.return_value = dataset retriever = RagRetriever( config, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), ) return retriever def get_dummy_custom_hf_index_retriever(self, from_disk: bool): dataset = self.get_dummy_dataset() config = RagConfig( retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), index_name="custom", ) if from_disk: config.passages_path = os.path.join(self.tmpdirname, "dataset") config.index_path = os.path.join(self.tmpdirname, "index.faiss") dataset.get_index("embeddings").save(os.path.join(self.tmpdirname, "index.faiss")) dataset.drop_index("embeddings") dataset.save_to_disk(os.path.join(self.tmpdirname, "dataset")) del dataset retriever = RagRetriever( config, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), ) else: retriever = RagRetriever( config, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), index=CustomHFIndex(config.retrieval_vector_size, dataset), ) return retriever def get_dummy_legacy_index_retriever(self): dataset = Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size + 1), 2 * np.ones(self.retrieval_vector_size + 1)], } ) dataset.add_faiss_index("embeddings", string_factory="Flat", metric_type=faiss.METRIC_INNER_PRODUCT) index_file_name = os.path.join(self.tmpdirname, "hf_bert_base.hnswSQ8_correct_phi_128.c_index") dataset.save_faiss_index("embeddings", index_file_name + ".index.dpr") pickle.dump(dataset["id"], open(index_file_name + ".index_meta.dpr", "wb")) passages_file_name = os.path.join(self.tmpdirname, "psgs_w100.tsv.pkl") passages = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset} pickle.dump(passages, open(passages_file_name, "wb")) config = RagConfig( retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), index_name="legacy", index_path=self.tmpdirname, ) retriever = RagRetriever( config, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer() ) return retriever def test_canonical_hf_index_retriever_retrieve(self): n_docs = 1 retriever = self.get_dummy_canonical_hf_index_retriever() hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) retrieved_doc_embeds, doc_ids, doc_dicts = retriever.retrieve(hidden_states, n_docs=n_docs) self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size)) self.assertEqual(len(doc_dicts), 2) self.assertEqual(sorted(doc_dicts[0]), ["embeddings", "id", "text", "title"]) self.assertEqual(len(doc_dicts[0]["id"]), n_docs) self.assertEqual(doc_dicts[0]["id"][0], "1") # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0], "0") # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist(), [[1], [0]]) def test_canonical_hf_index_retriever_save_and_from_pretrained(self): retriever = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset: mock_load_dataset.return_value = self.get_dummy_dataset() retriever.save_pretrained(tmp_dirname) retriever = RagRetriever.from_pretrained(tmp_dirname) self.assertIsInstance(retriever, RagRetriever) hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) out = retriever.retrieve(hidden_states, n_docs=1) self.assertTrue(out is not None) def test_custom_hf_index_retriever_retrieve(self): n_docs = 1 retriever = self.get_dummy_custom_hf_index_retriever(from_disk=False) hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) retrieved_doc_embeds, doc_ids, doc_dicts = retriever.retrieve(hidden_states, n_docs=n_docs) self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size)) self.assertEqual(len(doc_dicts), 2) self.assertEqual(sorted(doc_dicts[0]), ["embeddings", "id", "text", "title"]) self.assertEqual(len(doc_dicts[0]["id"]), n_docs) self.assertEqual(doc_dicts[0]["id"][0], "1") # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0], "0") # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist(), [[1], [0]]) def test_custom_hf_index_retriever_save_and_from_pretrained(self): retriever = self.get_dummy_custom_hf_index_retriever(from_disk=False) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(tmp_dirname) retriever = RagRetriever.from_pretrained(tmp_dirname) self.assertIsInstance(retriever, RagRetriever) hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) out = retriever.retrieve(hidden_states, n_docs=1) self.assertTrue(out is not None) def test_custom_hf_index_retriever_retrieve_from_disk(self): n_docs = 1 retriever = self.get_dummy_custom_hf_index_retriever(from_disk=True) hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) retrieved_doc_embeds, doc_ids, doc_dicts = retriever.retrieve(hidden_states, n_docs=n_docs) self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size)) self.assertEqual(len(doc_dicts), 2) self.assertEqual(sorted(doc_dicts[0]), ["embeddings", "id", "text", "title"]) self.assertEqual(len(doc_dicts[0]["id"]), n_docs) self.assertEqual(doc_dicts[0]["id"][0], "1") # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0], "0") # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist(), [[1], [0]]) def test_custom_hf_index_retriever_save_and_from_pretrained_from_disk(self): retriever = self.get_dummy_custom_hf_index_retriever(from_disk=True) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(tmp_dirname) retriever = RagRetriever.from_pretrained(tmp_dirname) self.assertIsInstance(retriever, RagRetriever) hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) out = retriever.retrieve(hidden_states, n_docs=1) self.assertTrue(out is not None) def test_legacy_index_retriever_retrieve(self): n_docs = 1 retriever = self.get_dummy_legacy_index_retriever() hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) retrieved_doc_embeds, doc_ids, doc_dicts = retriever.retrieve(hidden_states, n_docs=n_docs) self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size)) self.assertEqual(len(doc_dicts), 2) self.assertEqual(sorted(doc_dicts[0]), ["text", "title"]) self.assertEqual(len(doc_dicts[0]["text"]), n_docs) self.assertEqual(doc_dicts[0]["text"][0], "bar") # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["text"][0], "foo") # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist(), [[1], [0]]) def test_legacy_hf_index_retriever_save_and_from_pretrained(self): retriever = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(tmp_dirname) retriever = RagRetriever.from_pretrained(tmp_dirname) self.assertIsInstance(retriever, RagRetriever) hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) out = retriever.retrieve(hidden_states, n_docs=1) self.assertTrue(out is not None) @require_torch @require_tokenizers @require_sentencepiece def test_hf_index_retriever_call(self): import torch n_docs = 1 retriever = self.get_dummy_canonical_hf_index_retriever() question_input_ids = [[5, 7], [10, 11]] hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) out = retriever(question_input_ids, hidden_states, prefix=retriever.config.generator.prefix, n_docs=n_docs) context_input_ids, context_attention_mask, retrieved_doc_embeds = ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size)) self.assertIsInstance(context_input_ids, list) self.assertIsInstance(context_attention_mask, list) self.assertIsInstance(retrieved_doc_embeds, np.ndarray) out = retriever( question_input_ids, hidden_states, prefix=retriever.config.generator.prefix, n_docs=n_docs, return_tensors="pt", ) context_input_ids, context_attention_mask, retrieved_doc_embeds, doc_ids = ( # noqa: F841 out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], out["doc_ids"], ) self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size)) self.assertIsInstance(context_input_ids, torch.Tensor) self.assertIsInstance(context_attention_mask, torch.Tensor) self.assertIsInstance(retrieved_doc_embeds, torch.Tensor) @require_torch @require_tokenizers @require_sentencepiece def test_custom_hf_index_end2end_retriever_call(self): context_encoder_tokenizer = self.get_dpr_ctx_encoder_tokenizer() n_docs = 1 retriever = self.get_dummy_custom_hf_index_retriever(from_disk=False) retriever.set_ctx_encoder_tokenizer(context_encoder_tokenizer) question_input_ids = [[5, 7], [10, 11]] hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) out = retriever(question_input_ids, hidden_states, prefix=retriever.config.generator.prefix, n_docs=n_docs) self.assertEqual( len(out), 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask")), True ) # check for doc token related keys in dictionary.
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class RagRetrieverTest(TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() self.retrieval_vector_size = 8 # DPR tok vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] dpr_tokenizer_path = os.path.join(self.tmpdirname, "dpr_tokenizer") os.makedirs(dpr_tokenizer_path, exist_ok=True) self.vocab_file = os.path.join(dpr_tokenizer_path, DPR_VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) # BART tok vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] self.special_tokens_map = {"unk_token": "<unk>"} bart_tokenizer_path = os.path.join(self.tmpdirname, "bart_tokenizer") os.makedirs(bart_tokenizer_path, exist_ok=True) self.vocab_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) def get_dpr_tokenizer(self) -> DPRQuestionEncoderTokenizer: return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer")) def get_dpr_ctx_encoder_tokenizer(self) -> DPRContextEncoderTokenizer: return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer")) def get_bart_tokenizer(self) -> BartTokenizer: return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, "bart_tokenizer")) def tearDown(self): shutil.rmtree(self.tmpdirname) def get_dummy_dataset(self): dataset = Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size), 2 * np.ones(self.retrieval_vector_size)], } ) dataset.add_faiss_index("embeddings", string_factory="Flat", metric_type=faiss.METRIC_INNER_PRODUCT) return dataset def get_dummy_canonical_hf_index_retriever(self): dataset = self.get_dummy_dataset() config = RagConfig( retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), ) with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset: mock_load_dataset.return_value = dataset retriever = RagRetriever( config, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), ) return retriever def get_dummy_custom_hf_index_retriever(self, from_disk: bool): dataset = self.get_dummy_dataset() config = RagConfig( retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), index_name="custom", ) if from_disk: config.passages_path = os.path.join(self.tmpdirname, "dataset") config.index_path = os.path.join(self.tmpdirname, "index.faiss") dataset.get_index("embeddings").save(os.path.join(self.tmpdirname, "index.faiss")) dataset.drop_index("embeddings") dataset.save_to_disk(os.path.join(self.tmpdirname, "dataset")) del dataset retriever = RagRetriever( config, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), ) else: retriever = RagRetriever( config, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), index=CustomHFIndex(config.retrieval_vector_size, dataset), ) return retriever def get_dummy_legacy_index_retriever(self): dataset = Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size + 1), 2 * np.ones(self.retrieval_vector_size + 1)], } ) dataset.add_faiss_index("embeddings", string_factory="Flat", metric_type=faiss.METRIC_INNER_PRODUCT) index_file_name = os.path.join(self.tmpdirname, "hf_bert_base.hnswSQ8_correct_phi_128.c_index") dataset.save_faiss_index("embeddings", index_file_name + ".index.dpr") pickle.dump(dataset["id"], open(index_file_name + ".index_meta.dpr", "wb")) passages_file_name = os.path.join(self.tmpdirname, "psgs_w100.tsv.pkl") passages = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset} pickle.dump(passages, open(passages_file_name, "wb")) config = RagConfig( retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), index_name="legacy", index_path=self.tmpdirname, ) retriever = RagRetriever( config, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer() ) return retriever def test_canonical_hf_index_retriever_retrieve(self): n_docs = 1 retriever = self.get_dummy_canonical_hf_index_retriever() hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) retrieved_doc_embeds, doc_ids, doc_dicts = retriever.retrieve(hidden_states, n_docs=n_docs) self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size)) self.assertEqual(len(doc_dicts), 2) self.assertEqual(sorted(doc_dicts[0]), ["embeddings", "id", "text", "title"]) self.assertEqual(len(doc_dicts[0]["id"]), n_docs) self.assertEqual(doc_dicts[0]["id"][0], "1") # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0], "0") # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist(), [[1], [0]]) def test_canonical_hf_index_retriever_save_and_from_pretrained(self): retriever = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset: mock_load_dataset.return_value = self.get_dummy_dataset() retriever.save_pretrained(tmp_dirname) retriever = RagRetriever.from_pretrained(tmp_dirname) self.assertIsInstance(retriever, RagRetriever) hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) out = retriever.retrieve(hidden_states, n_docs=1) self.assertTrue(out is not None) def test_custom_hf_index_retriever_retrieve(self): n_docs = 1 retriever = self.get_dummy_custom_hf_index_retriever(from_disk=False) hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) retrieved_doc_embeds, doc_ids, doc_dicts = retriever.retrieve(hidden_states, n_docs=n_docs) self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size)) self.assertEqual(len(doc_dicts), 2) self.assertEqual(sorted(doc_dicts[0]), ["embeddings", "id", "text", "title"]) self.assertEqual(len(doc_dicts[0]["id"]), n_docs) self.assertEqual(doc_dicts[0]["id"][0], "1") # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0], "0") # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist(), [[1], [0]]) def test_custom_hf_index_retriever_save_and_from_pretrained(self): retriever = self.get_dummy_custom_hf_index_retriever(from_disk=False) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(tmp_dirname) retriever = RagRetriever.from_pretrained(tmp_dirname) self.assertIsInstance(retriever, RagRetriever) hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) out = retriever.retrieve(hidden_states, n_docs=1) self.assertTrue(out is not None) def test_custom_hf_index_retriever_retrieve_from_disk(self): n_docs = 1 retriever = self.get_dummy_custom_hf_index_retriever(from_disk=True) hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) retrieved_doc_embeds, doc_ids, doc_dicts = retriever.retrieve(hidden_states, n_docs=n_docs) self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size)) self.assertEqual(len(doc_dicts), 2) self.assertEqual(sorted(doc_dicts[0]), ["embeddings", "id", "text", "title"]) self.assertEqual(len(doc_dicts[0]["id"]), n_docs) self.assertEqual(doc_dicts[0]["id"][0], "1") # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0], "0") # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist(), [[1], [0]]) def test_custom_hf_index_retriever_save_and_from_pretrained_from_disk(self): retriever = self.get_dummy_custom_hf_index_retriever(from_disk=True) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(tmp_dirname) retriever = RagRetriever.from_pretrained(tmp_dirname) self.assertIsInstance(retriever, RagRetriever) hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) out = retriever.retrieve(hidden_states, n_docs=1) self.assertTrue(out is not None) def test_legacy_index_retriever_retrieve(self): n_docs = 1 retriever = self.get_dummy_legacy_index_retriever() hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) retrieved_doc_embeds, doc_ids, doc_dicts = retriever.retrieve(hidden_states, n_docs=n_docs) self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size)) self.assertEqual(len(doc_dicts), 2) self.assertEqual(sorted(doc_dicts[0]), ["text", "title"]) self.assertEqual(len(doc_dicts[0]["text"]), n_docs) self.assertEqual(doc_dicts[0]["text"][0], "bar") # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["text"][0], "foo") # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist(), [[1], [0]]) def test_legacy_hf_index_retriever_save_and_from_pretrained(self): retriever = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(tmp_dirname) retriever = RagRetriever.from_pretrained(tmp_dirname) self.assertIsInstance(retriever, RagRetriever) hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) out = retriever.retrieve(hidden_states, n_docs=1) self.assertTrue(out is not None) @require_torch @require_tokenizers @require_sentencepiece def test_hf_index_retriever_call(self): import torch n_docs = 1 retriever = self.get_dummy_canonical_hf_index_retriever() question_input_ids = [[5, 7], [10, 11]] hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) out = retriever(question_input_ids, hidden_states, prefix=retriever.config.generator.prefix, n_docs=n_docs) context_input_ids, context_attention_mask, retrieved_doc_embeds = ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size)) self.assertIsInstance(context_input_ids, list) self.assertIsInstance(context_attention_mask, list) self.assertIsInstance(retrieved_doc_embeds, np.ndarray) out = retriever( question_input_ids, hidden_states, prefix=retriever.config.generator.prefix, n_docs=n_docs, return_tensors="pt", ) context_input_ids, context_attention_mask, retrieved_doc_embeds, doc_ids = ( # noqa: F841 out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], out["doc_ids"], ) self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size)) self.assertIsInstance(context_input_ids, torch.Tensor) self.assertIsInstance(context_attention_mask, torch.Tensor) self.assertIsInstance(retrieved_doc_embeds, torch.Tensor) @require_torch @require_tokenizers @require_sentencepiece def test_custom_hf_index_end2end_retriever_call(self): context_encoder_tokenizer = self.get_dpr_ctx_encoder_tokenizer() n_docs = 1 retriever = self.get_dummy_custom_hf_index_retriever(from_disk=False) retriever.set_ctx_encoder_tokenizer(context_encoder_tokenizer) question_input_ids = [[5, 7], [10, 11]] hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) out = retriever(question_input_ids, hidden_states, prefix=retriever.config.generator.prefix, n_docs=n_docs) self.assertEqual( len(out), 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask")), True ) # check for doc token related keys in dictionary.
-1
huggingface/transformers
20,304
fix device issue
# What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
ydshieh
"2022-11-17T17:48:16Z"
"2022-11-21T09:12:26Z"
d316037ad71f8748aac9045ffd96970826456a04
8503cc755050c6ed5bc771e3244c29b71be1841e
fix device issue. # What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
./tests/models/mobilevit/test_modeling_mobilevit.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch MobileViT model. """ import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTFeatureExtractor class MobileViTConfigTester(ConfigTester): def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "hidden_sizes")) self.parent.assertTrue(hasattr(config, "neck_hidden_sizes")) self.parent.assertTrue(hasattr(config, "num_attention_heads")) class MobileViTModelTester: def __init__( self, parent, batch_size=13, image_size=32, patch_size=2, num_channels=3, last_hidden_size=640, num_attention_heads=4, hidden_act="silu", conv_kernel_size=3, output_stride=32, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, classifier_dropout_prob=0.1, initializer_range=0.02, is_training=True, use_labels=True, num_labels=10, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.last_hidden_size = last_hidden_size self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.conv_kernel_size = conv_kernel_size self.output_stride = output_stride self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.classifier_dropout_prob = classifier_dropout_prob self.use_labels = use_labels self.is_training = is_training self.num_labels = num_labels self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None pixel_labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) pixel_labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels, pixel_labels def get_config(self): return MobileViTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values, labels, pixel_labels): model = MobileViTModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def create_and_check_for_image_classification(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.num_labels model = MobileViTForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_semantic_segmentation(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.num_labels model = MobileViTForSemanticSegmentation(config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) result = model(pixel_values, labels=pixel_labels) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels, pixel_labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class MobileViTModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as MobileViT does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = False def setUp(self): self.model_tester = MobileViTModelTester(self) self.config_tester = MobileViTConfigTester(self, config_class=MobileViTConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="MobileViT does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="MobileViT does not support input and output embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="MobileViT does not output attentions") def test_attention_outputs(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_stages = 5 self.assertEqual(len(hidden_states), expected_num_stages) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. divisor = 2 for i in range(len(hidden_states)): self.assertListEqual( list(hidden_states[i].shape[-2:]), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], ) divisor *= 2 self.assertEqual(self.model_tester.output_stride, divisor // 2) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) def test_for_semantic_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = MobileViTModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class MobileViTModelIntegrationTest(unittest.TestCase): @cached_property def default_feature_extractor(self): return MobileViTFeatureExtractor.from_pretrained("apple/mobilevit-xx-small") if is_vision_available() else None @slow def test_inference_image_classification_head(self): model = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small").to(torch_device) feature_extractor = self.default_feature_extractor image = prepare_img() inputs = feature_extractor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-1.9364, -1.2327, -0.4653]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)) @slow def test_inference_semantic_segmentation(self): model = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small") model = model.to(torch_device) feature_extractor = MobileViTFeatureExtractor.from_pretrained("apple/deeplabv3-mobilevit-xx-small") image = prepare_img() inputs = feature_extractor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits # verify the logits expected_shape = torch.Size((1, 21, 32, 32)) self.assertEqual(logits.shape, expected_shape) expected_slice = torch.tensor( [ [[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]], [[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]], [[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]], ], device=torch_device, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], expected_slice, atol=1e-4)) @slow def test_post_processing_semantic_segmentation(self): model = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small") model = model.to(torch_device) feature_extractor = MobileViTFeatureExtractor.from_pretrained("apple/deeplabv3-mobilevit-xx-small") image = prepare_img() inputs = feature_extractor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) outputs.logits = outputs.logits.detach().cpu() segmentation = feature_extractor.post_process_semantic_segmentation(outputs=outputs, target_sizes=[(50, 60)]) expected_shape = torch.Size((50, 60)) self.assertEqual(segmentation[0].shape, expected_shape) segmentation = feature_extractor.post_process_semantic_segmentation(outputs=outputs) expected_shape = torch.Size((32, 32)) self.assertEqual(segmentation[0].shape, expected_shape)
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch MobileViT model. """ import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTFeatureExtractor class MobileViTConfigTester(ConfigTester): def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "hidden_sizes")) self.parent.assertTrue(hasattr(config, "neck_hidden_sizes")) self.parent.assertTrue(hasattr(config, "num_attention_heads")) class MobileViTModelTester: def __init__( self, parent, batch_size=13, image_size=32, patch_size=2, num_channels=3, last_hidden_size=640, num_attention_heads=4, hidden_act="silu", conv_kernel_size=3, output_stride=32, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, classifier_dropout_prob=0.1, initializer_range=0.02, is_training=True, use_labels=True, num_labels=10, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.last_hidden_size = last_hidden_size self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.conv_kernel_size = conv_kernel_size self.output_stride = output_stride self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.classifier_dropout_prob = classifier_dropout_prob self.use_labels = use_labels self.is_training = is_training self.num_labels = num_labels self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None pixel_labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) pixel_labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels, pixel_labels def get_config(self): return MobileViTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values, labels, pixel_labels): model = MobileViTModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def create_and_check_for_image_classification(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.num_labels model = MobileViTForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_semantic_segmentation(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.num_labels model = MobileViTForSemanticSegmentation(config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) result = model(pixel_values, labels=pixel_labels) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels, pixel_labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class MobileViTModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as MobileViT does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = False def setUp(self): self.model_tester = MobileViTModelTester(self) self.config_tester = MobileViTConfigTester(self, config_class=MobileViTConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="MobileViT does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="MobileViT does not support input and output embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="MobileViT does not output attentions") def test_attention_outputs(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_stages = 5 self.assertEqual(len(hidden_states), expected_num_stages) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. divisor = 2 for i in range(len(hidden_states)): self.assertListEqual( list(hidden_states[i].shape[-2:]), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], ) divisor *= 2 self.assertEqual(self.model_tester.output_stride, divisor // 2) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) def test_for_semantic_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = MobileViTModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class MobileViTModelIntegrationTest(unittest.TestCase): @cached_property def default_feature_extractor(self): return MobileViTFeatureExtractor.from_pretrained("apple/mobilevit-xx-small") if is_vision_available() else None @slow def test_inference_image_classification_head(self): model = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small").to(torch_device) feature_extractor = self.default_feature_extractor image = prepare_img() inputs = feature_extractor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-1.9364, -1.2327, -0.4653]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)) @slow def test_inference_semantic_segmentation(self): model = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small") model = model.to(torch_device) feature_extractor = MobileViTFeatureExtractor.from_pretrained("apple/deeplabv3-mobilevit-xx-small") image = prepare_img() inputs = feature_extractor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits # verify the logits expected_shape = torch.Size((1, 21, 32, 32)) self.assertEqual(logits.shape, expected_shape) expected_slice = torch.tensor( [ [[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]], [[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]], [[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]], ], device=torch_device, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], expected_slice, atol=1e-4)) @slow def test_post_processing_semantic_segmentation(self): model = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small") model = model.to(torch_device) feature_extractor = MobileViTFeatureExtractor.from_pretrained("apple/deeplabv3-mobilevit-xx-small") image = prepare_img() inputs = feature_extractor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) outputs.logits = outputs.logits.detach().cpu() segmentation = feature_extractor.post_process_semantic_segmentation(outputs=outputs, target_sizes=[(50, 60)]) expected_shape = torch.Size((50, 60)) self.assertEqual(segmentation[0].shape, expected_shape) segmentation = feature_extractor.post_process_semantic_segmentation(outputs=outputs) expected_shape = torch.Size((32, 32)) self.assertEqual(segmentation[0].shape, expected_shape)
-1
huggingface/transformers
20,304
fix device issue
# What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
ydshieh
"2022-11-17T17:48:16Z"
"2022-11-21T09:12:26Z"
d316037ad71f8748aac9045ffd96970826456a04
8503cc755050c6ed5bc771e3244c29b71be1841e
fix device issue. # What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
./src/transformers/models/wav2vec2_conformer/__init__.py
# flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all. # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = { "configuration_wav2vec2_conformer": [ "WAV2VEC2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2ConformerConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_wav2vec2_conformer"] = [ "WAV2VEC2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "Wav2Vec2ConformerForAudioFrameClassification", "Wav2Vec2ConformerForCTC", "Wav2Vec2ConformerForPreTraining", "Wav2Vec2ConformerForSequenceClassification", "Wav2Vec2ConformerForXVector", "Wav2Vec2ConformerModel", "Wav2Vec2ConformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_wav2vec2_conformer import ( WAV2VEC2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, Wav2Vec2ConformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wav2vec2_conformer import ( WAV2VEC2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, Wav2Vec2ConformerForAudioFrameClassification, Wav2Vec2ConformerForCTC, Wav2Vec2ConformerForPreTraining, Wav2Vec2ConformerForSequenceClassification, Wav2Vec2ConformerForXVector, Wav2Vec2ConformerModel, Wav2Vec2ConformerPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
# flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all. # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = { "configuration_wav2vec2_conformer": [ "WAV2VEC2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2ConformerConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_wav2vec2_conformer"] = [ "WAV2VEC2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "Wav2Vec2ConformerForAudioFrameClassification", "Wav2Vec2ConformerForCTC", "Wav2Vec2ConformerForPreTraining", "Wav2Vec2ConformerForSequenceClassification", "Wav2Vec2ConformerForXVector", "Wav2Vec2ConformerModel", "Wav2Vec2ConformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_wav2vec2_conformer import ( WAV2VEC2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, Wav2Vec2ConformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wav2vec2_conformer import ( WAV2VEC2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, Wav2Vec2ConformerForAudioFrameClassification, Wav2Vec2ConformerForCTC, Wav2Vec2ConformerForPreTraining, Wav2Vec2ConformerForSequenceClassification, Wav2Vec2ConformerForXVector, Wav2Vec2ConformerModel, Wav2Vec2ConformerPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
-1
huggingface/transformers
20,304
fix device issue
# What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
ydshieh
"2022-11-17T17:48:16Z"
"2022-11-21T09:12:26Z"
d316037ad71f8748aac9045ffd96970826456a04
8503cc755050c6ed5bc771e3244c29b71be1841e
fix device issue. # What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
./src/transformers/models/fnet/__init__.py
# flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all. # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _import_structure = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_fnet"] = ["FNetTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_fnet_fast"] = ["FNetTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_fnet"] = [ "FNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FNetForMaskedLM", "FNetForMultipleChoice", "FNetForNextSentencePrediction", "FNetForPreTraining", "FNetForQuestionAnswering", "FNetForSequenceClassification", "FNetForTokenClassification", "FNetLayer", "FNetModel", "FNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
# flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all. # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _import_structure = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_fnet"] = ["FNetTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_fnet_fast"] = ["FNetTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_fnet"] = [ "FNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FNetForMaskedLM", "FNetForMultipleChoice", "FNetForNextSentencePrediction", "FNetForPreTraining", "FNetForQuestionAnswering", "FNetForSequenceClassification", "FNetForTokenClassification", "FNetLayer", "FNetModel", "FNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
-1
huggingface/transformers
20,304
fix device issue
# What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
ydshieh
"2022-11-17T17:48:16Z"
"2022-11-21T09:12:26Z"
d316037ad71f8748aac9045ffd96970826456a04
8503cc755050c6ed5bc771e3244c29b71be1841e
fix device issue. # What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
./src/transformers/models/layoutxlm/tokenization_layoutxlm.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License """ Tokenization classes for LayoutXLM model.""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import ( BatchEncoding, EncodedInput, PreTokenizedInput, TextInput, TextInputPair, TruncationStrategy, ) from ...utils import PaddingStrategy, TensorType, add_end_docstrings, logging from ..xlm_roberta.tokenization_xlm_roberta import ( PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES, PRETRAINED_VOCAB_FILES_MAP, SPIECE_UNDERLINE, VOCAB_FILES_NAMES, ) logger = logging.get_logger(__name__) LAYOUTXLM_ENCODE_KWARGS_DOCSTRING = r""" add_special_tokens (`bool`, *optional*, defaults to `True`): Whether or not to encode the sequences with the special tokens relative to their model. padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. stride (`int`, *optional*, defaults to 0): If set to a number along with `max_length`, the overflowing tokens returned when `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). return_tensors (`str` or [`~file_utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_token_type_ids (`bool`, *optional*): Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are token type IDs?](../glossary#token-type-ids) return_attention_mask (`bool`, *optional*): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) return_overflowing_tokens (`bool`, *optional*, defaults to `False`): Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead of returning overflowing tokens. return_special_tokens_mask (`bool`, *optional*, defaults to `False`): Whether or not to return special tokens mask information. return_offsets_mapping (`bool`, *optional*, defaults to `False`): Whether or not to return `(char_start, char_end)` for each token. This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using Python's tokenizer, this method will raise `NotImplementedError`. return_length (`bool`, *optional*, defaults to `False`): Whether or not to return the lengths of the encoded inputs. verbose (`bool`, *optional*, defaults to `True`): Whether or not to print more information and warnings. **kwargs: passed to the `self.tokenize()` method Return: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. [What are input IDs?](../glossary#input-ids) - **bbox** -- List of bounding boxes to be fed to a model. - **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or if *"token_type_ids"* is in `self.model_input_names`). [What are token type IDs?](../glossary#token-type-ids) - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`). [What are attention masks?](../glossary#attention-mask) - **labels** -- List of labels to be fed to a model. (when `word_labels` is specified). - **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and `return_overflowing_tokens=True`). - **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and `return_overflowing_tokens=True`). - **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`). - **length** -- The length of the inputs (when `return_length=True`). """ class LayoutXLMTokenizer(PreTrainedTokenizer): """ Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on [SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. cls_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`): The bounding box to use for the special [CLS] token. sep_token_box (`List[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`): The bounding box to use for the special [SEP] token. pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`): The bounding box to use for the special [PAD] token. pad_token_label (`int`, *optional*, defaults to -100): The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's CrossEntropyLoss. only_label_first_subword (`bool`, *optional*, defaults to `True`): Whether or not to only label the first subword, in case word labels are provided. additional_special_tokens (`List[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`): Additional special tokens used by the tokenizer. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. Attributes: sp_model (`SentencePieceProcessor`): The *SentencePiece* processor that is used for every conversion (string, tokens and IDs). """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", cls_token_box=[0, 0, 0, 0], sep_token_box=[1000, 1000, 1000, 1000], pad_token_box=[0, 0, 0, 0], pad_token_label=-100, only_label_first_subword=True, sp_model_kwargs: Optional[Dict[str, Any]] = None, **kwargs ) -> None: # Mask token behave like a normal word, i.e. include the space before it mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, cls_token_box=cls_token_box, sep_token_box=sep_token_box, pad_token_box=pad_token_box, pad_token_label=pad_token_label, only_label_first_subword=only_label_first_subword, sp_model_kwargs=self.sp_model_kwargs, **kwargs, ) self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(vocab_file)) self.vocab_file = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token self.fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab self.fairseq_offset = 1 self.fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + self.fairseq_offset self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()} # additional properties self.cls_token_box = cls_token_box self.sep_token_box = sep_token_box self.pad_token_box = pad_token_box self.pad_token_label = pad_token_label self.only_label_first_subword = only_label_first_subword def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None state["sp_model_proto"] = self.sp_model.serialized_model_proto() return state def __setstate__(self, d): self.__dict__ = d # for backward compatibility if not hasattr(self, "sp_model_kwargs"): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLM-RoBERTa sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is None: return [1] + ([0] * len(token_ids_0)) + [1] return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] @property def vocab_size(self): return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _tokenize(self, text: str) -> List[str]: return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] spm_id = self.sp_model.PieceToId(token) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (strings for sub-words) in a single string.""" out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip() return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) @add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING) def __call__( self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None, boxes: Union[List[List[int]], List[List[List[int]]]] = None, word_labels: Optional[Union[List[int], List[List[int]]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs ) -> BatchEncoding: """ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences with word-level normalized bounding boxes and optional labels. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings (words of a single example or questions of a batch of examples) or a list of list of strings (batch of words). text_pair (`List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence should be a list of strings (pretokenized string). boxes (`List[List[int]]`, `List[List[List[int]]]`): Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale. word_labels (`List[int]`, `List[List[int]]`, *optional*): Word-level integer labels (for token classification tasks such as FUNSD, CORD). """ # Input type checking for clearer error def _is_valid_text_input(t): if isinstance(t, str): # Strings are fine return True elif isinstance(t, (list, tuple)): # List are fine as long as they are... if len(t) == 0: # ... empty return True elif isinstance(t[0], str): # ... list of strings return True elif isinstance(t[0], (list, tuple)): # ... list with an empty list or with a list of strings return len(t[0]) == 0 or isinstance(t[0][0], str) else: return False else: return False if text_pair is not None: # in case text + text_pair are provided, text = questions, text_pair = words if not _is_valid_text_input(text): raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ") if not isinstance(text_pair, (list, tuple)): raise ValueError( "words must of type `List[str]` (single pretokenized example), " "or `List[List[str]]` (batch of pretokenized examples)." ) else: # in case only text is provided => must be words if not isinstance(text, (list, tuple)): raise ValueError( "Words must of type `List[str]` (single pretokenized example), " "or `List[List[str]]` (batch of pretokenized examples)." ) if text_pair is not None: is_batched = isinstance(text, (list, tuple)) else: is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple)) words = text if text_pair is None else text_pair if boxes is None: raise ValueError("You must provide corresponding bounding boxes") if is_batched: if len(words) != len(boxes): raise ValueError("You must provide words and boxes for an equal amount of examples") for words_example, boxes_example in zip(words, boxes): if len(words_example) != len(boxes_example): raise ValueError("You must provide as many words as there are bounding boxes") else: if len(words) != len(boxes): raise ValueError("You must provide as many words as there are bounding boxes") if is_batched: if text_pair is not None and len(text) != len(text_pair): raise ValueError( f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:" f" {len(text_pair)}." ) batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text is_pair = bool(text_pair is not None) return self.batch_encode_plus( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) else: return self.encode_plus( text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) def _batch_encode_plus( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], ], is_pair: bool = None, boxes: Optional[List[List[List[int]]]] = None, word_labels: Optional[List[List[int]]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs ) -> BatchEncoding: if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast." ) batch_outputs = self._batch_prepare_for_model( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=return_tensors, verbose=verbose, ) return BatchEncoding(batch_outputs) @add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING) def _batch_prepare_for_model( self, batch_text_or_text_pairs, is_pair: bool = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[List[int]]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[str] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_length: bool = False, verbose: bool = True, ) -> BatchEncoding: """ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens Args: batch_ids_pairs: list of tokenized input ids or input ids pairs """ batch_outputs = {} for idx, example in enumerate(zip(batch_text_or_text_pairs, boxes)): batch_text_or_text_pair, boxes_example = example outputs = self.prepare_for_model( batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair, batch_text_or_text_pair[1] if is_pair else None, boxes_example, word_labels=word_labels[idx] if word_labels is not None else None, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=None, # we pad in batch afterward return_attention_mask=False, # we pad in batch afterward return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, # We convert the whole batch to tensors at the end prepend_batch_axis=False, verbose=verbose, ) for key, value in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) batch_outputs = self.pad( batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors) return batch_outputs def _encode_plus( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[int]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs ) -> BatchEncoding: if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast. " "More information on available tokenizers at " "https://github.com/huggingface/transformers/pull/2674" ) return self.prepare_for_model( text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding_strategy.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, prepend_batch_axis=True, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, verbose=verbose, ) @add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING) def prepare_for_model( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[int]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, prepend_batch_axis: bool = False, **kwargs ) -> BatchEncoding: """ Prepares a sequence or a pair of sequences so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens. Word-level `boxes` are turned into token-level `bbox`. If provided, word-level `word_labels` are turned into token-level `labels`. The word label is used for the first token of the word, while remaining tokens are labeled with -100, such that they will be ignored by the loss function. Args: text (`str`, `List[str]`, `List[List[str]]`): The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings. text_pair (`List[str]` or `List[int]`, *optional*): Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a list of list of strings (words of a batch of examples). """ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) tokens = [] pair_tokens = [] token_boxes = [] pair_token_boxes = [] labels = [] if text_pair is None: if word_labels is None: # CASE 1: document image classification (training + inference) + CASE 2: token classification (inference) for word, box in zip(text, boxes): if len(word) < 1: # skip empty words continue word_tokens = self.tokenize(word) tokens.extend(word_tokens) token_boxes.extend([box] * len(word_tokens)) else: # CASE 2: token classification (training) for word, box, label in zip(text, boxes, word_labels): if len(word) < 1: # skip empty words continue word_tokens = self.tokenize(word) tokens.extend(word_tokens) token_boxes.extend([box] * len(word_tokens)) if self.only_label_first_subword: # Use the real label id for the first token of the word, and padding ids for the remaining tokens labels.extend([label] + [self.pad_token_label] * (len(word_tokens) - 1)) else: labels.extend([label] * len(word_tokens)) else: # CASE 3: document visual question answering (inference) # text = question # text_pair = words tokens = self.tokenize(text) token_boxes = [self.pad_token_box for _ in range(len(tokens))] + [self.sep_token_box] for word, box in zip(text_pair, boxes): if len(word) < 1: # skip empty words continue word_tokens = self.tokenize(word) pair_tokens.extend(word_tokens) pair_token_boxes.extend([box] * len(word_tokens)) # Create ids + pair_ids ids = self.convert_tokens_to_ids(tokens) pair_ids = self.convert_tokens_to_ids(pair_tokens) if pair_tokens else None # Compute the total size of the returned encodings pair = bool(pair_ids is not None) len_ids = len(ids) len_pair_ids = len(pair_ids) if pair else 0 total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0) # Truncation: Handle max sequence length overflowing_tokens = [] overflowing_token_boxes = [] overflowing_labels = [] if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length: ( ids, token_boxes, pair_ids, pair_token_boxes, labels, overflowing_tokens, overflowing_token_boxes, overflowing_labels, ) = self.truncate_sequences( ids, token_boxes, pair_ids=pair_ids, pair_token_boxes=pair_token_boxes, labels=labels, num_tokens_to_remove=total_len - max_length, truncation_strategy=truncation_strategy, stride=stride, ) if return_token_type_ids and not add_special_tokens: raise ValueError( "Asking to return token_type_ids while setting add_special_tokens to False " "results in an undefined behavior. Please set add_special_tokens to True or " "set return_token_type_ids to None." ) # Load from model defaults if return_token_type_ids is None: return_token_type_ids = "token_type_ids" in self.model_input_names if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names encoded_inputs = {} if return_overflowing_tokens: encoded_inputs["overflowing_tokens"] = overflowing_tokens encoded_inputs["overflowing_token_boxes"] = overflowing_token_boxes encoded_inputs["overflowing_labels"] = overflowing_labels encoded_inputs["num_truncated_tokens"] = total_len - max_length # Add special tokens if add_special_tokens: sequence = self.build_inputs_with_special_tokens(ids, pair_ids) token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids) token_boxes = [self.cls_token_box] + token_boxes + [self.sep_token_box] if pair_token_boxes: pair_token_boxes = pair_token_boxes + [self.sep_token_box] if labels: labels = [self.pad_token_label] + labels + [self.pad_token_label] else: sequence = ids + pair_ids if pair else ids token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else []) # Build output dictionary encoded_inputs["input_ids"] = sequence encoded_inputs["bbox"] = token_boxes + pair_token_boxes if return_token_type_ids: encoded_inputs["token_type_ids"] = token_type_ids if return_special_tokens_mask: if add_special_tokens: encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids) else: encoded_inputs["special_tokens_mask"] = [0] * len(sequence) if labels: encoded_inputs["labels"] = labels # Check lengths self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose) # Padding if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask: encoded_inputs = self.pad( encoded_inputs, max_length=max_length, padding=padding_strategy.value, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) if return_length: encoded_inputs["length"] = len(encoded_inputs["input_ids"]) batch_outputs = BatchEncoding( encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis ) return batch_outputs def truncate_sequences( self, ids: List[int], token_boxes: List[List[int]], pair_ids: Optional[List[int]] = None, pair_token_boxes: Optional[List[List[int]]] = None, labels: Optional[List[int]] = None, num_tokens_to_remove: int = 0, truncation_strategy: Union[str, TruncationStrategy] = "longest_first", stride: int = 0, ) -> Tuple[List[int], List[int], List[int]]: """ Truncates a sequence pair in-place following the strategy. Args: ids (`List[int]`): Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. token_boxes (`List[List[int]]`): Bounding boxes of the first sequence. pair_ids (`List[int]`, *optional*): Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. pair_token_boxes (`List[List[int]]`, *optional*): Bounding boxes of the second sequence. labels (`List[int]`, *optional*): Labels of the first sequence (for token classification tasks). num_tokens_to_remove (`int`, *optional*, defaults to 0): Number of tokens to remove using the truncation strategy. truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): The strategy to follow for truncation. Can be: - `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). stride (`int`, *optional*, defaults to 0): If set to a positive number, the overflowing tokens returned will contain some tokens from the main sequence returned. The value of this argument defines the number of additional tokens. Returns: `Tuple[List[int], List[int], List[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of overflowing tokens. """ if num_tokens_to_remove <= 0: return ids, token_boxes, pair_ids, pair_token_boxes, labels, [], [], [] if not isinstance(truncation_strategy, TruncationStrategy): truncation_strategy = TruncationStrategy(truncation_strategy) overflowing_tokens = [] overflowing_token_boxes = [] overflowing_labels = [] if truncation_strategy == TruncationStrategy.LONGEST_FIRST: for _ in range(num_tokens_to_remove): if pair_ids is None or len(ids) > len(pair_ids): if not overflowing_tokens: window_len = min(len(ids), stride + 1) else: window_len = 1 overflowing_tokens.extend(ids[-window_len:]) overflowing_token_boxes.extend(token_boxes[-window_len:]) overflowing_labels.extend(labels[-window_len:]) ids = ids[:-1] token_boxes = token_boxes[:-1] labels = labels[:-1] else: if not overflowing_tokens: window_len = min(len(pair_ids), stride + 1) else: window_len = 1 overflowing_tokens.extend(pair_ids[-window_len:]) overflowing_token_boxes.extend(pair_token_boxes[-window_len:]) pair_ids = pair_ids[:-1] pair_token_boxes = pair_token_boxes[:-1] elif truncation_strategy == TruncationStrategy.ONLY_FIRST: if len(ids) > num_tokens_to_remove: window_len = min(len(ids), stride + num_tokens_to_remove) overflowing_tokens = ids[-window_len:] overflowing_token_boxes = token_boxes[-window_len:] overflowing_labels = labels[-window_len:] ids = ids[:-num_tokens_to_remove] token_boxes = token_boxes[:-num_tokens_to_remove] labels = labels[:-num_tokens_to_remove] else: logger.error( f"We need to remove {num_tokens_to_remove} to truncate the input " f"but the first sequence has a length {len(ids)}. " f"Please select another truncation strategy than {truncation_strategy}, " "for instance 'longest_first' or 'only_second'." ) elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None: if len(pair_ids) > num_tokens_to_remove: window_len = min(len(pair_ids), stride + num_tokens_to_remove) overflowing_tokens = pair_ids[-window_len:] overflowing_token_boxes = pair_token_boxes[-window_len:] pair_ids = pair_ids[:-num_tokens_to_remove] pair_token_boxes = pair_token_boxes[:-num_tokens_to_remove] else: logger.error( f"We need to remove {num_tokens_to_remove} to truncate the input " f"but the second sequence has a length {len(pair_ids)}. " f"Please select another truncation strategy than {truncation_strategy}, " "for instance 'longest_first' or 'only_first'." ) return ( ids, token_boxes, pair_ids, pair_token_boxes, labels, overflowing_tokens, overflowing_token_boxes, overflowing_labels, ) def _pad( self, encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], max_length: Optional[int] = None, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int] = None, return_attention_mask: Optional[bool] = None, ) -> dict: """ Pad encoded inputs (on left/right and up to predefined length or max length in the batch) Args: encoded_inputs: Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. padding_strategy: PaddingStrategy to use for padding. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - PaddingStrategy.DO_NOT_PAD: Do not pad The tokenizer padding sides are defined in self.padding_side: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability >= 7.5 (Volta). return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics) """ # Load from model defaults if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names required_input = encoded_inputs[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: max_length = len(required_input) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length # Initialize attention mask if not present. if return_attention_mask and "attention_mask" not in encoded_inputs: encoded_inputs["attention_mask"] = [1] * len(required_input) if needs_to_be_padded: difference = max_length - len(required_input) if self.padding_side == "right": if return_attention_mask: encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = ( encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference ) if "bbox" in encoded_inputs: encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference if "labels" in encoded_inputs: encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference elif self.padding_side == "left": if return_attention_mask: encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[ "token_type_ids" ] if "bbox" in encoded_inputs: encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"] if "labels" in encoded_inputs: encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"] if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input else: raise ValueError("Invalid padding strategy:" + str(self.padding_side)) return encoded_inputs
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License """ Tokenization classes for LayoutXLM model.""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import ( BatchEncoding, EncodedInput, PreTokenizedInput, TextInput, TextInputPair, TruncationStrategy, ) from ...utils import PaddingStrategy, TensorType, add_end_docstrings, logging from ..xlm_roberta.tokenization_xlm_roberta import ( PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES, PRETRAINED_VOCAB_FILES_MAP, SPIECE_UNDERLINE, VOCAB_FILES_NAMES, ) logger = logging.get_logger(__name__) LAYOUTXLM_ENCODE_KWARGS_DOCSTRING = r""" add_special_tokens (`bool`, *optional*, defaults to `True`): Whether or not to encode the sequences with the special tokens relative to their model. padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. stride (`int`, *optional*, defaults to 0): If set to a number along with `max_length`, the overflowing tokens returned when `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). return_tensors (`str` or [`~file_utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_token_type_ids (`bool`, *optional*): Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are token type IDs?](../glossary#token-type-ids) return_attention_mask (`bool`, *optional*): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) return_overflowing_tokens (`bool`, *optional*, defaults to `False`): Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead of returning overflowing tokens. return_special_tokens_mask (`bool`, *optional*, defaults to `False`): Whether or not to return special tokens mask information. return_offsets_mapping (`bool`, *optional*, defaults to `False`): Whether or not to return `(char_start, char_end)` for each token. This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using Python's tokenizer, this method will raise `NotImplementedError`. return_length (`bool`, *optional*, defaults to `False`): Whether or not to return the lengths of the encoded inputs. verbose (`bool`, *optional*, defaults to `True`): Whether or not to print more information and warnings. **kwargs: passed to the `self.tokenize()` method Return: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. [What are input IDs?](../glossary#input-ids) - **bbox** -- List of bounding boxes to be fed to a model. - **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or if *"token_type_ids"* is in `self.model_input_names`). [What are token type IDs?](../glossary#token-type-ids) - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`). [What are attention masks?](../glossary#attention-mask) - **labels** -- List of labels to be fed to a model. (when `word_labels` is specified). - **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and `return_overflowing_tokens=True`). - **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and `return_overflowing_tokens=True`). - **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`). - **length** -- The length of the inputs (when `return_length=True`). """ class LayoutXLMTokenizer(PreTrainedTokenizer): """ Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on [SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. cls_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`): The bounding box to use for the special [CLS] token. sep_token_box (`List[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`): The bounding box to use for the special [SEP] token. pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`): The bounding box to use for the special [PAD] token. pad_token_label (`int`, *optional*, defaults to -100): The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's CrossEntropyLoss. only_label_first_subword (`bool`, *optional*, defaults to `True`): Whether or not to only label the first subword, in case word labels are provided. additional_special_tokens (`List[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`): Additional special tokens used by the tokenizer. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. Attributes: sp_model (`SentencePieceProcessor`): The *SentencePiece* processor that is used for every conversion (string, tokens and IDs). """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", cls_token_box=[0, 0, 0, 0], sep_token_box=[1000, 1000, 1000, 1000], pad_token_box=[0, 0, 0, 0], pad_token_label=-100, only_label_first_subword=True, sp_model_kwargs: Optional[Dict[str, Any]] = None, **kwargs ) -> None: # Mask token behave like a normal word, i.e. include the space before it mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, cls_token_box=cls_token_box, sep_token_box=sep_token_box, pad_token_box=pad_token_box, pad_token_label=pad_token_label, only_label_first_subword=only_label_first_subword, sp_model_kwargs=self.sp_model_kwargs, **kwargs, ) self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(vocab_file)) self.vocab_file = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token self.fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab self.fairseq_offset = 1 self.fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + self.fairseq_offset self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()} # additional properties self.cls_token_box = cls_token_box self.sep_token_box = sep_token_box self.pad_token_box = pad_token_box self.pad_token_label = pad_token_label self.only_label_first_subword = only_label_first_subword def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None state["sp_model_proto"] = self.sp_model.serialized_model_proto() return state def __setstate__(self, d): self.__dict__ = d # for backward compatibility if not hasattr(self, "sp_model_kwargs"): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLM-RoBERTa sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is None: return [1] + ([0] * len(token_ids_0)) + [1] return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] @property def vocab_size(self): return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _tokenize(self, text: str) -> List[str]: return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] spm_id = self.sp_model.PieceToId(token) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (strings for sub-words) in a single string.""" out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip() return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) @add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING) def __call__( self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None, boxes: Union[List[List[int]], List[List[List[int]]]] = None, word_labels: Optional[Union[List[int], List[List[int]]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs ) -> BatchEncoding: """ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences with word-level normalized bounding boxes and optional labels. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings (words of a single example or questions of a batch of examples) or a list of list of strings (batch of words). text_pair (`List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence should be a list of strings (pretokenized string). boxes (`List[List[int]]`, `List[List[List[int]]]`): Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale. word_labels (`List[int]`, `List[List[int]]`, *optional*): Word-level integer labels (for token classification tasks such as FUNSD, CORD). """ # Input type checking for clearer error def _is_valid_text_input(t): if isinstance(t, str): # Strings are fine return True elif isinstance(t, (list, tuple)): # List are fine as long as they are... if len(t) == 0: # ... empty return True elif isinstance(t[0], str): # ... list of strings return True elif isinstance(t[0], (list, tuple)): # ... list with an empty list or with a list of strings return len(t[0]) == 0 or isinstance(t[0][0], str) else: return False else: return False if text_pair is not None: # in case text + text_pair are provided, text = questions, text_pair = words if not _is_valid_text_input(text): raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ") if not isinstance(text_pair, (list, tuple)): raise ValueError( "words must of type `List[str]` (single pretokenized example), " "or `List[List[str]]` (batch of pretokenized examples)." ) else: # in case only text is provided => must be words if not isinstance(text, (list, tuple)): raise ValueError( "Words must of type `List[str]` (single pretokenized example), " "or `List[List[str]]` (batch of pretokenized examples)." ) if text_pair is not None: is_batched = isinstance(text, (list, tuple)) else: is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple)) words = text if text_pair is None else text_pair if boxes is None: raise ValueError("You must provide corresponding bounding boxes") if is_batched: if len(words) != len(boxes): raise ValueError("You must provide words and boxes for an equal amount of examples") for words_example, boxes_example in zip(words, boxes): if len(words_example) != len(boxes_example): raise ValueError("You must provide as many words as there are bounding boxes") else: if len(words) != len(boxes): raise ValueError("You must provide as many words as there are bounding boxes") if is_batched: if text_pair is not None and len(text) != len(text_pair): raise ValueError( f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:" f" {len(text_pair)}." ) batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text is_pair = bool(text_pair is not None) return self.batch_encode_plus( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) else: return self.encode_plus( text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) def _batch_encode_plus( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], ], is_pair: bool = None, boxes: Optional[List[List[List[int]]]] = None, word_labels: Optional[List[List[int]]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs ) -> BatchEncoding: if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast." ) batch_outputs = self._batch_prepare_for_model( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=return_tensors, verbose=verbose, ) return BatchEncoding(batch_outputs) @add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING) def _batch_prepare_for_model( self, batch_text_or_text_pairs, is_pair: bool = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[List[int]]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[str] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_length: bool = False, verbose: bool = True, ) -> BatchEncoding: """ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens Args: batch_ids_pairs: list of tokenized input ids or input ids pairs """ batch_outputs = {} for idx, example in enumerate(zip(batch_text_or_text_pairs, boxes)): batch_text_or_text_pair, boxes_example = example outputs = self.prepare_for_model( batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair, batch_text_or_text_pair[1] if is_pair else None, boxes_example, word_labels=word_labels[idx] if word_labels is not None else None, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=None, # we pad in batch afterward return_attention_mask=False, # we pad in batch afterward return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, # We convert the whole batch to tensors at the end prepend_batch_axis=False, verbose=verbose, ) for key, value in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) batch_outputs = self.pad( batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors) return batch_outputs def _encode_plus( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[int]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs ) -> BatchEncoding: if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast. " "More information on available tokenizers at " "https://github.com/huggingface/transformers/pull/2674" ) return self.prepare_for_model( text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding_strategy.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, prepend_batch_axis=True, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, verbose=verbose, ) @add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING) def prepare_for_model( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[int]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, prepend_batch_axis: bool = False, **kwargs ) -> BatchEncoding: """ Prepares a sequence or a pair of sequences so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens. Word-level `boxes` are turned into token-level `bbox`. If provided, word-level `word_labels` are turned into token-level `labels`. The word label is used for the first token of the word, while remaining tokens are labeled with -100, such that they will be ignored by the loss function. Args: text (`str`, `List[str]`, `List[List[str]]`): The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings. text_pair (`List[str]` or `List[int]`, *optional*): Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a list of list of strings (words of a batch of examples). """ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) tokens = [] pair_tokens = [] token_boxes = [] pair_token_boxes = [] labels = [] if text_pair is None: if word_labels is None: # CASE 1: document image classification (training + inference) + CASE 2: token classification (inference) for word, box in zip(text, boxes): if len(word) < 1: # skip empty words continue word_tokens = self.tokenize(word) tokens.extend(word_tokens) token_boxes.extend([box] * len(word_tokens)) else: # CASE 2: token classification (training) for word, box, label in zip(text, boxes, word_labels): if len(word) < 1: # skip empty words continue word_tokens = self.tokenize(word) tokens.extend(word_tokens) token_boxes.extend([box] * len(word_tokens)) if self.only_label_first_subword: # Use the real label id for the first token of the word, and padding ids for the remaining tokens labels.extend([label] + [self.pad_token_label] * (len(word_tokens) - 1)) else: labels.extend([label] * len(word_tokens)) else: # CASE 3: document visual question answering (inference) # text = question # text_pair = words tokens = self.tokenize(text) token_boxes = [self.pad_token_box for _ in range(len(tokens))] + [self.sep_token_box] for word, box in zip(text_pair, boxes): if len(word) < 1: # skip empty words continue word_tokens = self.tokenize(word) pair_tokens.extend(word_tokens) pair_token_boxes.extend([box] * len(word_tokens)) # Create ids + pair_ids ids = self.convert_tokens_to_ids(tokens) pair_ids = self.convert_tokens_to_ids(pair_tokens) if pair_tokens else None # Compute the total size of the returned encodings pair = bool(pair_ids is not None) len_ids = len(ids) len_pair_ids = len(pair_ids) if pair else 0 total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0) # Truncation: Handle max sequence length overflowing_tokens = [] overflowing_token_boxes = [] overflowing_labels = [] if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length: ( ids, token_boxes, pair_ids, pair_token_boxes, labels, overflowing_tokens, overflowing_token_boxes, overflowing_labels, ) = self.truncate_sequences( ids, token_boxes, pair_ids=pair_ids, pair_token_boxes=pair_token_boxes, labels=labels, num_tokens_to_remove=total_len - max_length, truncation_strategy=truncation_strategy, stride=stride, ) if return_token_type_ids and not add_special_tokens: raise ValueError( "Asking to return token_type_ids while setting add_special_tokens to False " "results in an undefined behavior. Please set add_special_tokens to True or " "set return_token_type_ids to None." ) # Load from model defaults if return_token_type_ids is None: return_token_type_ids = "token_type_ids" in self.model_input_names if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names encoded_inputs = {} if return_overflowing_tokens: encoded_inputs["overflowing_tokens"] = overflowing_tokens encoded_inputs["overflowing_token_boxes"] = overflowing_token_boxes encoded_inputs["overflowing_labels"] = overflowing_labels encoded_inputs["num_truncated_tokens"] = total_len - max_length # Add special tokens if add_special_tokens: sequence = self.build_inputs_with_special_tokens(ids, pair_ids) token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids) token_boxes = [self.cls_token_box] + token_boxes + [self.sep_token_box] if pair_token_boxes: pair_token_boxes = pair_token_boxes + [self.sep_token_box] if labels: labels = [self.pad_token_label] + labels + [self.pad_token_label] else: sequence = ids + pair_ids if pair else ids token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else []) # Build output dictionary encoded_inputs["input_ids"] = sequence encoded_inputs["bbox"] = token_boxes + pair_token_boxes if return_token_type_ids: encoded_inputs["token_type_ids"] = token_type_ids if return_special_tokens_mask: if add_special_tokens: encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids) else: encoded_inputs["special_tokens_mask"] = [0] * len(sequence) if labels: encoded_inputs["labels"] = labels # Check lengths self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose) # Padding if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask: encoded_inputs = self.pad( encoded_inputs, max_length=max_length, padding=padding_strategy.value, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) if return_length: encoded_inputs["length"] = len(encoded_inputs["input_ids"]) batch_outputs = BatchEncoding( encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis ) return batch_outputs def truncate_sequences( self, ids: List[int], token_boxes: List[List[int]], pair_ids: Optional[List[int]] = None, pair_token_boxes: Optional[List[List[int]]] = None, labels: Optional[List[int]] = None, num_tokens_to_remove: int = 0, truncation_strategy: Union[str, TruncationStrategy] = "longest_first", stride: int = 0, ) -> Tuple[List[int], List[int], List[int]]: """ Truncates a sequence pair in-place following the strategy. Args: ids (`List[int]`): Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. token_boxes (`List[List[int]]`): Bounding boxes of the first sequence. pair_ids (`List[int]`, *optional*): Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. pair_token_boxes (`List[List[int]]`, *optional*): Bounding boxes of the second sequence. labels (`List[int]`, *optional*): Labels of the first sequence (for token classification tasks). num_tokens_to_remove (`int`, *optional*, defaults to 0): Number of tokens to remove using the truncation strategy. truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): The strategy to follow for truncation. Can be: - `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). stride (`int`, *optional*, defaults to 0): If set to a positive number, the overflowing tokens returned will contain some tokens from the main sequence returned. The value of this argument defines the number of additional tokens. Returns: `Tuple[List[int], List[int], List[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of overflowing tokens. """ if num_tokens_to_remove <= 0: return ids, token_boxes, pair_ids, pair_token_boxes, labels, [], [], [] if not isinstance(truncation_strategy, TruncationStrategy): truncation_strategy = TruncationStrategy(truncation_strategy) overflowing_tokens = [] overflowing_token_boxes = [] overflowing_labels = [] if truncation_strategy == TruncationStrategy.LONGEST_FIRST: for _ in range(num_tokens_to_remove): if pair_ids is None or len(ids) > len(pair_ids): if not overflowing_tokens: window_len = min(len(ids), stride + 1) else: window_len = 1 overflowing_tokens.extend(ids[-window_len:]) overflowing_token_boxes.extend(token_boxes[-window_len:]) overflowing_labels.extend(labels[-window_len:]) ids = ids[:-1] token_boxes = token_boxes[:-1] labels = labels[:-1] else: if not overflowing_tokens: window_len = min(len(pair_ids), stride + 1) else: window_len = 1 overflowing_tokens.extend(pair_ids[-window_len:]) overflowing_token_boxes.extend(pair_token_boxes[-window_len:]) pair_ids = pair_ids[:-1] pair_token_boxes = pair_token_boxes[:-1] elif truncation_strategy == TruncationStrategy.ONLY_FIRST: if len(ids) > num_tokens_to_remove: window_len = min(len(ids), stride + num_tokens_to_remove) overflowing_tokens = ids[-window_len:] overflowing_token_boxes = token_boxes[-window_len:] overflowing_labels = labels[-window_len:] ids = ids[:-num_tokens_to_remove] token_boxes = token_boxes[:-num_tokens_to_remove] labels = labels[:-num_tokens_to_remove] else: logger.error( f"We need to remove {num_tokens_to_remove} to truncate the input " f"but the first sequence has a length {len(ids)}. " f"Please select another truncation strategy than {truncation_strategy}, " "for instance 'longest_first' or 'only_second'." ) elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None: if len(pair_ids) > num_tokens_to_remove: window_len = min(len(pair_ids), stride + num_tokens_to_remove) overflowing_tokens = pair_ids[-window_len:] overflowing_token_boxes = pair_token_boxes[-window_len:] pair_ids = pair_ids[:-num_tokens_to_remove] pair_token_boxes = pair_token_boxes[:-num_tokens_to_remove] else: logger.error( f"We need to remove {num_tokens_to_remove} to truncate the input " f"but the second sequence has a length {len(pair_ids)}. " f"Please select another truncation strategy than {truncation_strategy}, " "for instance 'longest_first' or 'only_first'." ) return ( ids, token_boxes, pair_ids, pair_token_boxes, labels, overflowing_tokens, overflowing_token_boxes, overflowing_labels, ) def _pad( self, encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], max_length: Optional[int] = None, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int] = None, return_attention_mask: Optional[bool] = None, ) -> dict: """ Pad encoded inputs (on left/right and up to predefined length or max length in the batch) Args: encoded_inputs: Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. padding_strategy: PaddingStrategy to use for padding. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - PaddingStrategy.DO_NOT_PAD: Do not pad The tokenizer padding sides are defined in self.padding_side: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability >= 7.5 (Volta). return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics) """ # Load from model defaults if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names required_input = encoded_inputs[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: max_length = len(required_input) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length # Initialize attention mask if not present. if return_attention_mask and "attention_mask" not in encoded_inputs: encoded_inputs["attention_mask"] = [1] * len(required_input) if needs_to_be_padded: difference = max_length - len(required_input) if self.padding_side == "right": if return_attention_mask: encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = ( encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference ) if "bbox" in encoded_inputs: encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference if "labels" in encoded_inputs: encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference elif self.padding_side == "left": if return_attention_mask: encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[ "token_type_ids" ] if "bbox" in encoded_inputs: encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"] if "labels" in encoded_inputs: encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"] if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input else: raise ValueError("Invalid padding strategy:" + str(self.padding_side)) return encoded_inputs
-1
huggingface/transformers
20,304
fix device issue
# What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
ydshieh
"2022-11-17T17:48:16Z"
"2022-11-21T09:12:26Z"
d316037ad71f8748aac9045ffd96970826456a04
8503cc755050c6ed5bc771e3244c29b71be1841e
fix device issue. # What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
./src/transformers/models/mobilenet_v2/feature_extraction_mobilenet_v2.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Feature extractor class for MobileNetV2.""" from ...utils import logging from .image_processing_mobilenet_v2 import MobileNetV2ImageProcessor logger = logging.get_logger(__name__) MobileNetV2FeatureExtractor = MobileNetV2ImageProcessor
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Feature extractor class for MobileNetV2.""" from ...utils import logging from .image_processing_mobilenet_v2 import MobileNetV2ImageProcessor logger = logging.get_logger(__name__) MobileNetV2FeatureExtractor = MobileNetV2ImageProcessor
-1
huggingface/transformers
20,304
fix device issue
# What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
ydshieh
"2022-11-17T17:48:16Z"
"2022-11-21T09:12:26Z"
d316037ad71f8748aac9045ffd96970826456a04
8503cc755050c6ed5bc771e3244c29b71be1841e
fix device issue. # What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
./tests/models/dpr/test_modeling_dpr.py
# coding=utf-8 # Copyright 2020 Huggingface # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from transformers import DPRConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import DPRContextEncoder, DPRQuestionEncoder, DPRReader, DPRReaderTokenizer from transformers.models.dpr.modeling_dpr import ( DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class DPRModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=False, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, projection_dim=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.projection_dim = projection_dim def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return DPRConfig( projection_dim=self.projection_dim, vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, ) def create_and_check_context_encoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = DPRContextEncoder(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim or self.hidden_size)) def create_and_check_question_encoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = DPRQuestionEncoder(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim or self.hidden_size)) def create_and_check_reader( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = DPRReader(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.relevance_logits.shape, (self.batch_size,)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids} return config, inputs_dict @require_torch class DPRModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = ( ( DPRContextEncoder, DPRQuestionEncoder, DPRReader, ) if is_torch_available() else () ) test_resize_embeddings = False test_missing_keys = False # why? test_pruning = False test_head_masking = False def setUp(self): self.model_tester = DPRModelTester(self) self.config_tester = ConfigTester(self, config_class=DPRConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_context_encoder_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_context_encoder(*config_and_inputs) def test_question_encoder_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_question_encoder(*config_and_inputs) def test_reader_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_reader(*config_and_inputs) def test_init_changed_config(self): config = self.model_tester.prepare_config_and_inputs()[0] model = DPRQuestionEncoder(config=config) model.to(torch_device) model.eval() with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model = DPRQuestionEncoder.from_pretrained(tmp_dirname, projection_dim=512) self.assertIsNotNone(model) @slow def test_model_from_pretrained(self): for model_name in DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = DPRContextEncoder.from_pretrained(model_name) self.assertIsNotNone(model) for model_name in DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = DPRContextEncoder.from_pretrained(model_name) self.assertIsNotNone(model) for model_name in DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = DPRQuestionEncoder.from_pretrained(model_name) self.assertIsNotNone(model) for model_name in DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = DPRReader.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch class DPRModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head(self): model = DPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base", return_dict=False) model.to(torch_device) input_ids = torch.tensor( [[101, 7592, 1010, 2003, 2026, 3899, 10140, 1029, 102]], dtype=torch.long, device=torch_device ) # [CLS] hello, is my dog cute? [SEP] output = model(input_ids)[0] # embedding shape = (1, 768) # compare the actual values for a slice. expected_slice = torch.tensor( [ [ 0.03236253, 0.12753335, 0.16818509, 0.00279786, 0.3896933, 0.24264945, 0.2178971, -0.02335227, -0.08481959, -0.14324117, ] ], dtype=torch.float, device=torch_device, ) self.assertTrue(torch.allclose(output[:, :10], expected_slice, atol=1e-4)) @slow def test_reader_inference(self): tokenizer = DPRReaderTokenizer.from_pretrained("facebook/dpr-reader-single-nq-base") model = DPRReader.from_pretrained("facebook/dpr-reader-single-nq-base") model.to(torch_device) encoded_inputs = tokenizer( questions="What is love ?", titles="Haddaway", texts="What Is Love is a song recorded by the artist Haddaway", padding=True, return_tensors="pt", ) encoded_inputs.to(torch_device) outputs = model(**encoded_inputs) # compare the actual values for a slice. expected_start_logits = torch.tensor( [[-10.3005, -10.7765, -11.4872, -11.6841, -11.9312, -10.3002, -9.8544, -11.7378, -12.0821, -10.2975]], dtype=torch.float, device=torch_device, ) expected_end_logits = torch.tensor( [[-11.0684, -11.7041, -11.5397, -10.3465, -10.8791, -6.8443, -11.9959, -11.0364, -10.0096, -6.8405]], dtype=torch.float, device=torch_device, ) self.assertTrue(torch.allclose(outputs.start_logits[:, :10], expected_start_logits, atol=1e-4)) self.assertTrue(torch.allclose(outputs.end_logits[:, :10], expected_end_logits, atol=1e-4))
# coding=utf-8 # Copyright 2020 Huggingface # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from transformers import DPRConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import DPRContextEncoder, DPRQuestionEncoder, DPRReader, DPRReaderTokenizer from transformers.models.dpr.modeling_dpr import ( DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class DPRModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=False, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, projection_dim=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.projection_dim = projection_dim def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return DPRConfig( projection_dim=self.projection_dim, vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, ) def create_and_check_context_encoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = DPRContextEncoder(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim or self.hidden_size)) def create_and_check_question_encoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = DPRQuestionEncoder(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim or self.hidden_size)) def create_and_check_reader( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = DPRReader(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.relevance_logits.shape, (self.batch_size,)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids} return config, inputs_dict @require_torch class DPRModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = ( ( DPRContextEncoder, DPRQuestionEncoder, DPRReader, ) if is_torch_available() else () ) test_resize_embeddings = False test_missing_keys = False # why? test_pruning = False test_head_masking = False def setUp(self): self.model_tester = DPRModelTester(self) self.config_tester = ConfigTester(self, config_class=DPRConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_context_encoder_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_context_encoder(*config_and_inputs) def test_question_encoder_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_question_encoder(*config_and_inputs) def test_reader_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_reader(*config_and_inputs) def test_init_changed_config(self): config = self.model_tester.prepare_config_and_inputs()[0] model = DPRQuestionEncoder(config=config) model.to(torch_device) model.eval() with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model = DPRQuestionEncoder.from_pretrained(tmp_dirname, projection_dim=512) self.assertIsNotNone(model) @slow def test_model_from_pretrained(self): for model_name in DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = DPRContextEncoder.from_pretrained(model_name) self.assertIsNotNone(model) for model_name in DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = DPRContextEncoder.from_pretrained(model_name) self.assertIsNotNone(model) for model_name in DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = DPRQuestionEncoder.from_pretrained(model_name) self.assertIsNotNone(model) for model_name in DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = DPRReader.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch class DPRModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head(self): model = DPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base", return_dict=False) model.to(torch_device) input_ids = torch.tensor( [[101, 7592, 1010, 2003, 2026, 3899, 10140, 1029, 102]], dtype=torch.long, device=torch_device ) # [CLS] hello, is my dog cute? [SEP] output = model(input_ids)[0] # embedding shape = (1, 768) # compare the actual values for a slice. expected_slice = torch.tensor( [ [ 0.03236253, 0.12753335, 0.16818509, 0.00279786, 0.3896933, 0.24264945, 0.2178971, -0.02335227, -0.08481959, -0.14324117, ] ], dtype=torch.float, device=torch_device, ) self.assertTrue(torch.allclose(output[:, :10], expected_slice, atol=1e-4)) @slow def test_reader_inference(self): tokenizer = DPRReaderTokenizer.from_pretrained("facebook/dpr-reader-single-nq-base") model = DPRReader.from_pretrained("facebook/dpr-reader-single-nq-base") model.to(torch_device) encoded_inputs = tokenizer( questions="What is love ?", titles="Haddaway", texts="What Is Love is a song recorded by the artist Haddaway", padding=True, return_tensors="pt", ) encoded_inputs.to(torch_device) outputs = model(**encoded_inputs) # compare the actual values for a slice. expected_start_logits = torch.tensor( [[-10.3005, -10.7765, -11.4872, -11.6841, -11.9312, -10.3002, -9.8544, -11.7378, -12.0821, -10.2975]], dtype=torch.float, device=torch_device, ) expected_end_logits = torch.tensor( [[-11.0684, -11.7041, -11.5397, -10.3465, -10.8791, -6.8443, -11.9959, -11.0364, -10.0096, -6.8405]], dtype=torch.float, device=torch_device, ) self.assertTrue(torch.allclose(outputs.start_logits[:, :10], expected_start_logits, atol=1e-4)) self.assertTrue(torch.allclose(outputs.end_logits[:, :10], expected_end_logits, atol=1e-4))
-1
huggingface/transformers
20,304
fix device issue
# What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
ydshieh
"2022-11-17T17:48:16Z"
"2022-11-21T09:12:26Z"
d316037ad71f8748aac9045ffd96970826456a04
8503cc755050c6ed5bc771e3244c29b71be1841e
fix device issue. # What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
./tests/models/blenderbot_small/__init__.py
-1
huggingface/transformers
20,304
fix device issue
# What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
ydshieh
"2022-11-17T17:48:16Z"
"2022-11-21T09:12:26Z"
d316037ad71f8748aac9045ffd96970826456a04
8503cc755050c6ed5bc771e3244c29b71be1841e
fix device issue. # What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
./tests/models/xlm_roberta_xl/__init__.py
-1
huggingface/transformers
20,304
fix device issue
# What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
ydshieh
"2022-11-17T17:48:16Z"
"2022-11-21T09:12:26Z"
d316037ad71f8748aac9045ffd96970826456a04
8503cc755050c6ed5bc771e3244c29b71be1841e
fix device issue. # What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
./tests/models/t5/test_tokenization_t5.py
# coding=utf-8 # Copyright 2018 Google T5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import tempfile import unittest from transformers import SPIECE_UNDERLINE, AddedToken, BatchEncoding, T5Tokenizer, T5TokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): FRAMEWORK = "pt" elif is_tf_available(): FRAMEWORK = "tf" else: FRAMEWORK = "jax" @require_sentencepiece @require_tokenizers class T5TokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = T5Tokenizer rust_tokenizer_class = T5TokenizerFast test_rust_tokenizer = True test_sentencepiece = True def setUp(self): super().setUp() # We have a SentencePiece fixture for testing tokenizer = T5Tokenizer(SAMPLE_VOCAB) tokenizer.save_pretrained(self.tmpdirname) def test_convert_token_and_id(self): """Test ``_convert_token_to_id`` and ``_convert_id_to_token``.""" token = "<s>" token_id = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "<unk>") self.assertEqual(vocab_keys[1], "<s>") self.assertEqual(vocab_keys[-1], "<pad>") self.assertEqual(len(vocab_keys), 1_101) def test_vocab_size(self): self.assertEqual(self.get_tokenizer().vocab_size, 1_100) def test_full_tokenizer(self): tokenizer = T5Tokenizer(SAMPLE_VOCAB) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [285, 46, 10, 170, 382]) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual(ids, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4]) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ], ) @cached_property def t5_base_tokenizer(self): return T5Tokenizer.from_pretrained("t5-base") @cached_property def t5_base_tokenizer_fast(self): return T5TokenizerFast.from_pretrained("t5-base") def get_tokenizer(self, **kwargs) -> T5Tokenizer: return self.tokenizer_class.from_pretrained(self.tmpdirname, pad_token=None, **kwargs) def get_rust_tokenizer(self, **kwargs) -> T5TokenizerFast: return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, pad_token=None, **kwargs) def test_rust_and_python_full_tokenizers(self): if not self.test_rust_tokenizer: return tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() sequence = "I was born in 92000, and this is falsé." tokens = tokenizer.tokenize(sequence) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) rust_tokenizer = self.get_rust_tokenizer() ids = tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) def test_eos_treatment(self): tokenizer = self.t5_base_tokenizer batch_with_eos_added = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"]) batch_without_eos_added = tokenizer(["hi", "I went to the gym", ""]) self.assertListEqual(batch_with_eos_added["input_ids"], batch_without_eos_added["input_ids"]) def test_prepare_batch(self): tokenizer = self.t5_base_tokenizer src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] expected_src_tokens = [71, 307, 8986, 21, 4505, 1635, 1707, 5, tokenizer.eos_token_id] batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK) self.assertIsInstance(batch, BatchEncoding) if FRAMEWORK != "jax": result = list(batch.input_ids.numpy()[0]) else: result = list(batch.input_ids.tolist()[0]) self.assertListEqual(expected_src_tokens, result) self.assertEqual((2, 9), batch.input_ids.shape) self.assertEqual((2, 9), batch.attention_mask.shape) def test_empty_target_text(self): tokenizer = self.t5_base_tokenizer src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK) # check if input_ids are returned and no decoder_input_ids self.assertIn("input_ids", batch) self.assertIn("attention_mask", batch) self.assertNotIn("decoder_input_ids", batch) self.assertNotIn("decoder_attention_mask", batch) def test_max_length(self): tokenizer = self.t5_base_tokenizer tgt_text = [ "Summary of the text.", "Another summary.", ] targets = tokenizer( text_target=tgt_text, max_length=32, padding="max_length", truncation=True, return_tensors=FRAMEWORK ) self.assertEqual(32, targets["input_ids"].shape[1]) def test_outputs_not_longer_than_maxlen(self): tokenizer = self.t5_base_tokenizer batch = tokenizer( ["I am a small frog" * 1000, "I am a small frog"], padding=True, truncation=True, return_tensors=FRAMEWORK ) self.assertIsInstance(batch, BatchEncoding) # Since T5 does NOT have a max input length, # this test should be changed to the following in Transformers v5: # self.assertEqual(batch.input_ids.shape, (2, 8001)) self.assertEqual(batch.input_ids.shape, (2, 512)) def test_eos_in_input(self): tokenizer = self.t5_base_tokenizer src_text = ["A long paragraph for summarization. </s>"] tgt_text = ["Summary of the text. </s>"] expected_src_tokens = [71, 307, 8986, 21, 4505, 1635, 1707, 5, 1] expected_tgt_tokens = [20698, 13, 8, 1499, 5, 1] batch = tokenizer(src_text, text_target=tgt_text) self.assertEqual(expected_src_tokens, batch["input_ids"][0]) self.assertEqual(expected_tgt_tokens, batch["labels"][0]) def test_token_type_ids(self): src_text_1 = ["A first paragraph for summarization."] src_text_2 = ["A second paragraph for summarization."] fast_token_type_ids = self.t5_base_tokenizer_fast( src_text_1, src_text_2, add_special_tokens=True, return_token_type_ids=True ).token_type_ids slow_token_type_ids = self.t5_base_tokenizer( src_text_1, src_text_2, add_special_tokens=True, return_token_type_ids=True ).token_type_ids self.assertEqual(slow_token_type_ids, fast_token_type_ids) self.assertEqual(len(slow_token_type_ids[0]), 18) def test_fast_and_slow_same_result(self): src_text = "<pad> Today is <unk> nice day </s>" tgt_ids = [0, 1960, 19, 2, 1245, 239, 1] tgt_text = "<pad> Today is<unk> nice day</s>" fast_ids = self.t5_base_tokenizer_fast(src_text, add_special_tokens=False).input_ids slow_ids = self.t5_base_tokenizer(src_text, add_special_tokens=False).input_ids self.assertEqual(tgt_ids, fast_ids) self.assertEqual(tgt_ids, slow_ids) fast_text = self.t5_base_tokenizer_fast.decode(fast_ids) slow_text = self.t5_base_tokenizer.decode(fast_ids) self.assertEqual(tgt_text, fast_text) self.assertEqual(tgt_text, slow_text) def test_special_tokens_initialization(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): added_tokens = [f"<extra_id_{i}>" for i in range(100)] + [AddedToken("<special>", lstrip=True)] tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs ) tokenizer_cr = self.rust_tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs, from_slow=True ) tokenizer_p = self.tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs ) p_output = tokenizer_p.encode("Hey this is a <special> token") r_output = tokenizer_r.encode("Hey this is a <special> token") cr_output = tokenizer_cr.encode("Hey this is a <special> token") special_token_id = tokenizer_r.encode("<special>", add_special_tokens=False)[0] self.assertEqual(p_output, r_output) self.assertEqual(cr_output, r_output) self.assertTrue(special_token_id in p_output) self.assertTrue(special_token_id in r_output) self.assertTrue(special_token_id in cr_output) def test_special_tokens_initialization_with_non_empty_additional_special_tokens(self): tokenizer_list = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer())) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer())) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(tmp_dir) with open(os.path.join(tmp_dir, "special_tokens_map.json"), encoding="utf-8") as json_file: special_tokens_map = json.load(json_file) with open(os.path.join(tmp_dir, "tokenizer_config.json"), encoding="utf-8") as json_file: tokenizer_config = json.load(json_file) added_tokens_extra_ids = [f"<extra_id_{i}>" for i in range(100)] special_tokens_map["additional_special_tokens"] = added_tokens_extra_ids + [ "an_additional_special_token" ] tokenizer_config["additional_special_tokens"] = added_tokens_extra_ids + [ "an_additional_special_token" ] with open(os.path.join(tmp_dir, "special_tokens_map.json"), "w", encoding="utf-8") as outfile: json.dump(special_tokens_map, outfile) with open(os.path.join(tmp_dir, "tokenizer_config.json"), "w", encoding="utf-8") as outfile: json.dump(tokenizer_config, outfile) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files tokenizer_without_change_in_init = tokenizer_class.from_pretrained( tmp_dir, ) self.assertIn( "an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"]) ), ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained new_added_tokens = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=True)] tokenizer = tokenizer_class.from_pretrained( tmp_dir, additional_special_tokens=new_added_tokens, ) self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens) self.assertEqual( ["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"]) ), ) # overwritten from `test_tokenization_common` since T5 has no max length def test_pretrained_model_lists(self): # We should have at least one default checkpoint for each tokenizer # We should specify the max input length as well (used in some part to list the pretrained checkpoints) self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map), 1) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]), 1) @slow def test_tokenizer_integration(self): # fmt: off expected_encoding = {'input_ids': [[31220, 7, 41, 14034, 801, 38, 3, 102, 63, 17, 127, 524, 18, 7031, 2032, 277, 11, 3, 102, 63, 17, 127, 524, 18, 2026, 17, 10761, 18, 7041, 61, 795, 879, 18, 19681, 4648, 7, 41, 12920, 382, 6, 350, 6383, 4949, 6, 2158, 12920, 382, 9, 6, 3, 4, 11160, 6, 2043, 17153, 279, 49, 17, 6, 3, 4, 434, 9688, 11439, 21, 6869, 10509, 17725, 41, 567, 9138, 61, 11, 6869, 10509, 11946, 41, 18207, 517, 61, 28, 147, 3538, 1220, 7140, 10761, 2250, 16, 910, 1220, 8024, 11, 1659, 1413, 32, 883, 2020, 344, 2215, 226, 6, 12901, 382, 127, 524, 11, 4738, 7, 127, 15390, 5, 1], [272, 24203, 19, 876, 12, 554, 18, 9719, 1659, 2647, 26352, 6497, 7, 45, 73, 9339, 400, 26, 1499, 57, 22801, 10760, 30, 321, 646, 11, 269, 2625, 16, 66, 7500, 5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [37, 1704, 4216, 3, 20400, 4418, 7, 147, 8, 19743, 1782, 5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=expected_encoding, model_name="t5-base", revision="5a7ff2d8f5117c194c7e32ec1ccbf04642cca99b", )
# coding=utf-8 # Copyright 2018 Google T5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import tempfile import unittest from transformers import SPIECE_UNDERLINE, AddedToken, BatchEncoding, T5Tokenizer, T5TokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): FRAMEWORK = "pt" elif is_tf_available(): FRAMEWORK = "tf" else: FRAMEWORK = "jax" @require_sentencepiece @require_tokenizers class T5TokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = T5Tokenizer rust_tokenizer_class = T5TokenizerFast test_rust_tokenizer = True test_sentencepiece = True def setUp(self): super().setUp() # We have a SentencePiece fixture for testing tokenizer = T5Tokenizer(SAMPLE_VOCAB) tokenizer.save_pretrained(self.tmpdirname) def test_convert_token_and_id(self): """Test ``_convert_token_to_id`` and ``_convert_id_to_token``.""" token = "<s>" token_id = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "<unk>") self.assertEqual(vocab_keys[1], "<s>") self.assertEqual(vocab_keys[-1], "<pad>") self.assertEqual(len(vocab_keys), 1_101) def test_vocab_size(self): self.assertEqual(self.get_tokenizer().vocab_size, 1_100) def test_full_tokenizer(self): tokenizer = T5Tokenizer(SAMPLE_VOCAB) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [285, 46, 10, 170, 382]) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual(ids, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4]) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ], ) @cached_property def t5_base_tokenizer(self): return T5Tokenizer.from_pretrained("t5-base") @cached_property def t5_base_tokenizer_fast(self): return T5TokenizerFast.from_pretrained("t5-base") def get_tokenizer(self, **kwargs) -> T5Tokenizer: return self.tokenizer_class.from_pretrained(self.tmpdirname, pad_token=None, **kwargs) def get_rust_tokenizer(self, **kwargs) -> T5TokenizerFast: return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, pad_token=None, **kwargs) def test_rust_and_python_full_tokenizers(self): if not self.test_rust_tokenizer: return tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() sequence = "I was born in 92000, and this is falsé." tokens = tokenizer.tokenize(sequence) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) rust_tokenizer = self.get_rust_tokenizer() ids = tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) def test_eos_treatment(self): tokenizer = self.t5_base_tokenizer batch_with_eos_added = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"]) batch_without_eos_added = tokenizer(["hi", "I went to the gym", ""]) self.assertListEqual(batch_with_eos_added["input_ids"], batch_without_eos_added["input_ids"]) def test_prepare_batch(self): tokenizer = self.t5_base_tokenizer src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] expected_src_tokens = [71, 307, 8986, 21, 4505, 1635, 1707, 5, tokenizer.eos_token_id] batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK) self.assertIsInstance(batch, BatchEncoding) if FRAMEWORK != "jax": result = list(batch.input_ids.numpy()[0]) else: result = list(batch.input_ids.tolist()[0]) self.assertListEqual(expected_src_tokens, result) self.assertEqual((2, 9), batch.input_ids.shape) self.assertEqual((2, 9), batch.attention_mask.shape) def test_empty_target_text(self): tokenizer = self.t5_base_tokenizer src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK) # check if input_ids are returned and no decoder_input_ids self.assertIn("input_ids", batch) self.assertIn("attention_mask", batch) self.assertNotIn("decoder_input_ids", batch) self.assertNotIn("decoder_attention_mask", batch) def test_max_length(self): tokenizer = self.t5_base_tokenizer tgt_text = [ "Summary of the text.", "Another summary.", ] targets = tokenizer( text_target=tgt_text, max_length=32, padding="max_length", truncation=True, return_tensors=FRAMEWORK ) self.assertEqual(32, targets["input_ids"].shape[1]) def test_outputs_not_longer_than_maxlen(self): tokenizer = self.t5_base_tokenizer batch = tokenizer( ["I am a small frog" * 1000, "I am a small frog"], padding=True, truncation=True, return_tensors=FRAMEWORK ) self.assertIsInstance(batch, BatchEncoding) # Since T5 does NOT have a max input length, # this test should be changed to the following in Transformers v5: # self.assertEqual(batch.input_ids.shape, (2, 8001)) self.assertEqual(batch.input_ids.shape, (2, 512)) def test_eos_in_input(self): tokenizer = self.t5_base_tokenizer src_text = ["A long paragraph for summarization. </s>"] tgt_text = ["Summary of the text. </s>"] expected_src_tokens = [71, 307, 8986, 21, 4505, 1635, 1707, 5, 1] expected_tgt_tokens = [20698, 13, 8, 1499, 5, 1] batch = tokenizer(src_text, text_target=tgt_text) self.assertEqual(expected_src_tokens, batch["input_ids"][0]) self.assertEqual(expected_tgt_tokens, batch["labels"][0]) def test_token_type_ids(self): src_text_1 = ["A first paragraph for summarization."] src_text_2 = ["A second paragraph for summarization."] fast_token_type_ids = self.t5_base_tokenizer_fast( src_text_1, src_text_2, add_special_tokens=True, return_token_type_ids=True ).token_type_ids slow_token_type_ids = self.t5_base_tokenizer( src_text_1, src_text_2, add_special_tokens=True, return_token_type_ids=True ).token_type_ids self.assertEqual(slow_token_type_ids, fast_token_type_ids) self.assertEqual(len(slow_token_type_ids[0]), 18) def test_fast_and_slow_same_result(self): src_text = "<pad> Today is <unk> nice day </s>" tgt_ids = [0, 1960, 19, 2, 1245, 239, 1] tgt_text = "<pad> Today is<unk> nice day</s>" fast_ids = self.t5_base_tokenizer_fast(src_text, add_special_tokens=False).input_ids slow_ids = self.t5_base_tokenizer(src_text, add_special_tokens=False).input_ids self.assertEqual(tgt_ids, fast_ids) self.assertEqual(tgt_ids, slow_ids) fast_text = self.t5_base_tokenizer_fast.decode(fast_ids) slow_text = self.t5_base_tokenizer.decode(fast_ids) self.assertEqual(tgt_text, fast_text) self.assertEqual(tgt_text, slow_text) def test_special_tokens_initialization(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): added_tokens = [f"<extra_id_{i}>" for i in range(100)] + [AddedToken("<special>", lstrip=True)] tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs ) tokenizer_cr = self.rust_tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs, from_slow=True ) tokenizer_p = self.tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs ) p_output = tokenizer_p.encode("Hey this is a <special> token") r_output = tokenizer_r.encode("Hey this is a <special> token") cr_output = tokenizer_cr.encode("Hey this is a <special> token") special_token_id = tokenizer_r.encode("<special>", add_special_tokens=False)[0] self.assertEqual(p_output, r_output) self.assertEqual(cr_output, r_output) self.assertTrue(special_token_id in p_output) self.assertTrue(special_token_id in r_output) self.assertTrue(special_token_id in cr_output) def test_special_tokens_initialization_with_non_empty_additional_special_tokens(self): tokenizer_list = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer())) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer())) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(tmp_dir) with open(os.path.join(tmp_dir, "special_tokens_map.json"), encoding="utf-8") as json_file: special_tokens_map = json.load(json_file) with open(os.path.join(tmp_dir, "tokenizer_config.json"), encoding="utf-8") as json_file: tokenizer_config = json.load(json_file) added_tokens_extra_ids = [f"<extra_id_{i}>" for i in range(100)] special_tokens_map["additional_special_tokens"] = added_tokens_extra_ids + [ "an_additional_special_token" ] tokenizer_config["additional_special_tokens"] = added_tokens_extra_ids + [ "an_additional_special_token" ] with open(os.path.join(tmp_dir, "special_tokens_map.json"), "w", encoding="utf-8") as outfile: json.dump(special_tokens_map, outfile) with open(os.path.join(tmp_dir, "tokenizer_config.json"), "w", encoding="utf-8") as outfile: json.dump(tokenizer_config, outfile) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files tokenizer_without_change_in_init = tokenizer_class.from_pretrained( tmp_dir, ) self.assertIn( "an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"]) ), ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained new_added_tokens = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=True)] tokenizer = tokenizer_class.from_pretrained( tmp_dir, additional_special_tokens=new_added_tokens, ) self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens) self.assertEqual( ["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"]) ), ) # overwritten from `test_tokenization_common` since T5 has no max length def test_pretrained_model_lists(self): # We should have at least one default checkpoint for each tokenizer # We should specify the max input length as well (used in some part to list the pretrained checkpoints) self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map), 1) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]), 1) @slow def test_tokenizer_integration(self): # fmt: off expected_encoding = {'input_ids': [[31220, 7, 41, 14034, 801, 38, 3, 102, 63, 17, 127, 524, 18, 7031, 2032, 277, 11, 3, 102, 63, 17, 127, 524, 18, 2026, 17, 10761, 18, 7041, 61, 795, 879, 18, 19681, 4648, 7, 41, 12920, 382, 6, 350, 6383, 4949, 6, 2158, 12920, 382, 9, 6, 3, 4, 11160, 6, 2043, 17153, 279, 49, 17, 6, 3, 4, 434, 9688, 11439, 21, 6869, 10509, 17725, 41, 567, 9138, 61, 11, 6869, 10509, 11946, 41, 18207, 517, 61, 28, 147, 3538, 1220, 7140, 10761, 2250, 16, 910, 1220, 8024, 11, 1659, 1413, 32, 883, 2020, 344, 2215, 226, 6, 12901, 382, 127, 524, 11, 4738, 7, 127, 15390, 5, 1], [272, 24203, 19, 876, 12, 554, 18, 9719, 1659, 2647, 26352, 6497, 7, 45, 73, 9339, 400, 26, 1499, 57, 22801, 10760, 30, 321, 646, 11, 269, 2625, 16, 66, 7500, 5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [37, 1704, 4216, 3, 20400, 4418, 7, 147, 8, 19743, 1782, 5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=expected_encoding, model_name="t5-base", revision="5a7ff2d8f5117c194c7e32ec1ccbf04642cca99b", )
-1
huggingface/transformers
20,304
fix device issue
# What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
ydshieh
"2022-11-17T17:48:16Z"
"2022-11-21T09:12:26Z"
d316037ad71f8748aac9045ffd96970826456a04
8503cc755050c6ed5bc771e3244c29b71be1841e
fix device issue. # What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
./tests/models/poolformer/test_modeling_poolformer.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch PoolFormer model. """ import inspect import unittest from transformers import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor if is_torch_available(): import torch from transformers import MODEL_MAPPING, PoolFormerConfig, PoolFormerForImageClassification, PoolFormerModel from transformers.models.poolformer.modeling_poolformer import POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import PoolFormerFeatureExtractor class PoolFormerConfigTester(ConfigTester): def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "hidden_sizes")) self.parent.assertTrue(hasattr(config, "num_encoder_blocks")) class PoolFormerModelTester: def __init__( self, parent, batch_size=13, image_size=64, num_channels=3, num_encoder_blocks=4, depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1], hidden_sizes=[16, 32, 64, 128], downsampling_rates=[1, 4, 8, 16], is_training=False, use_labels=True, hidden_act="gelu", hidden_dropout_prob=0.1, initializer_range=0.02, num_labels=3, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.num_encoder_blocks = num_encoder_blocks self.sr_ratios = sr_ratios self.depths = depths self.hidden_sizes = hidden_sizes self.downsampling_rates = downsampling_rates self.is_training = is_training self.use_labels = use_labels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.initializer_range = initializer_range self.num_labels = num_labels self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = PoolFormerConfig( image_size=self.image_size, num_channels=self.num_channels, num_encoder_blocks=self.num_encoder_blocks, depths=self.depths, hidden_sizes=self.hidden_sizes, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, initializer_range=self.initializer_range, ) return config, pixel_values, labels def create_and_check_model(self, config, pixel_values, labels): model = PoolFormerModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) expected_height = expected_width = self.image_size // 32.0 self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class PoolFormerModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (PoolFormerModel, PoolFormerForImageClassification) if is_torch_available() else () test_head_masking = False test_pruning = False test_resize_embeddings = False test_torchscript = False has_attentions = False def setUp(self): self.model_tester = PoolFormerModelTester(self) self.config_tester = PoolFormerConfigTester(self, config_class=PoolFormerConfig) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="PoolFormer does not output attentions") def test_attention_outputs(self): pass @unittest.skip("PoolFormer does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip("PoolFormer does not have get_input_embeddings method and get_output_embeddings methods") def test_model_common_attributes(self): pass def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = self.model_tester.num_encoder_blocks self.assertEqual(len(hidden_states), expected_num_layers) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:]), [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_training(self): if not self.model_tester.is_training: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: if model_class in get_values(MODEL_MAPPING): continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) @slow def test_model_from_pretrained(self): for model_name in POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = PoolFormerModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch class PoolFormerModelIntegrationTest(unittest.TestCase): @slow def test_inference_image_classification_head(self): feature_extractor = PoolFormerFeatureExtractor() model = PoolFormerForImageClassification.from_pretrained("sail/poolformer_s12").to(torch_device) inputs = feature_extractor(images=prepare_img(), return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-0.6113, 0.1685, -0.0492]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch PoolFormer model. """ import inspect import unittest from transformers import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor if is_torch_available(): import torch from transformers import MODEL_MAPPING, PoolFormerConfig, PoolFormerForImageClassification, PoolFormerModel from transformers.models.poolformer.modeling_poolformer import POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import PoolFormerFeatureExtractor class PoolFormerConfigTester(ConfigTester): def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "hidden_sizes")) self.parent.assertTrue(hasattr(config, "num_encoder_blocks")) class PoolFormerModelTester: def __init__( self, parent, batch_size=13, image_size=64, num_channels=3, num_encoder_blocks=4, depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1], hidden_sizes=[16, 32, 64, 128], downsampling_rates=[1, 4, 8, 16], is_training=False, use_labels=True, hidden_act="gelu", hidden_dropout_prob=0.1, initializer_range=0.02, num_labels=3, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.num_encoder_blocks = num_encoder_blocks self.sr_ratios = sr_ratios self.depths = depths self.hidden_sizes = hidden_sizes self.downsampling_rates = downsampling_rates self.is_training = is_training self.use_labels = use_labels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.initializer_range = initializer_range self.num_labels = num_labels self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = PoolFormerConfig( image_size=self.image_size, num_channels=self.num_channels, num_encoder_blocks=self.num_encoder_blocks, depths=self.depths, hidden_sizes=self.hidden_sizes, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, initializer_range=self.initializer_range, ) return config, pixel_values, labels def create_and_check_model(self, config, pixel_values, labels): model = PoolFormerModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) expected_height = expected_width = self.image_size // 32.0 self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class PoolFormerModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (PoolFormerModel, PoolFormerForImageClassification) if is_torch_available() else () test_head_masking = False test_pruning = False test_resize_embeddings = False test_torchscript = False has_attentions = False def setUp(self): self.model_tester = PoolFormerModelTester(self) self.config_tester = PoolFormerConfigTester(self, config_class=PoolFormerConfig) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="PoolFormer does not output attentions") def test_attention_outputs(self): pass @unittest.skip("PoolFormer does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip("PoolFormer does not have get_input_embeddings method and get_output_embeddings methods") def test_model_common_attributes(self): pass def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = self.model_tester.num_encoder_blocks self.assertEqual(len(hidden_states), expected_num_layers) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:]), [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_training(self): if not self.model_tester.is_training: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: if model_class in get_values(MODEL_MAPPING): continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) @slow def test_model_from_pretrained(self): for model_name in POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = PoolFormerModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch class PoolFormerModelIntegrationTest(unittest.TestCase): @slow def test_inference_image_classification_head(self): feature_extractor = PoolFormerFeatureExtractor() model = PoolFormerForImageClassification.from_pretrained("sail/poolformer_s12").to(torch_device) inputs = feature_extractor(images=prepare_img(), return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-0.6113, 0.1685, -0.0492]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
-1
huggingface/transformers
20,304
fix device issue
# What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
ydshieh
"2022-11-17T17:48:16Z"
"2022-11-21T09:12:26Z"
d316037ad71f8748aac9045ffd96970826456a04
8503cc755050c6ed5bc771e3244c29b71be1841e
fix device issue. # What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
./src/transformers/models/yoso/convert_yoso_pytorch_to_pytorch.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert YOSO checkpoints from the original repository. URL: https://github.com/mlpen/YOSO""" import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def rename_key(orig_key): if "model" in orig_key: orig_key = orig_key.replace("model.", "") if "norm1" in orig_key: orig_key = orig_key.replace("norm1", "attention.output.LayerNorm") if "norm2" in orig_key: orig_key = orig_key.replace("norm2", "output.LayerNorm") if "norm" in orig_key: orig_key = orig_key.replace("norm", "LayerNorm") if "transformer" in orig_key: layer_num = orig_key.split(".")[0].split("_")[-1] orig_key = orig_key.replace(f"transformer_{layer_num}", f"encoder.layer.{layer_num}") if "mha.attn" in orig_key: orig_key = orig_key.replace("mha.attn", "attention.self") if "mha" in orig_key: orig_key = orig_key.replace("mha", "attention") if "W_q" in orig_key: orig_key = orig_key.replace("W_q", "self.query") if "W_k" in orig_key: orig_key = orig_key.replace("W_k", "self.key") if "W_v" in orig_key: orig_key = orig_key.replace("W_v", "self.value") if "ff1" in orig_key: orig_key = orig_key.replace("ff1", "intermediate.dense") if "ff2" in orig_key: orig_key = orig_key.replace("ff2", "output.dense") if "ff" in orig_key: orig_key = orig_key.replace("ff", "output.dense") if "mlm_class" in orig_key: orig_key = orig_key.replace("mlm.mlm_class", "cls.predictions.decoder") if "mlm" in orig_key: orig_key = orig_key.replace("mlm", "cls.predictions.transform") if "cls" not in orig_key: orig_key = "yoso." + orig_key return orig_key def convert_checkpoint_helper(max_position_embeddings, orig_state_dict): for key in orig_state_dict.copy().keys(): val = orig_state_dict.pop(key) if ("pooler" in key) or ("sen_class" in key): continue else: orig_state_dict[rename_key(key)] = val orig_state_dict["cls.predictions.bias"] = orig_state_dict["cls.predictions.decoder.bias"] orig_state_dict["yoso.embeddings.position_ids"] = torch.arange(max_position_embeddings).expand((1, -1)) + 2 return orig_state_dict def convert_yoso_checkpoint(checkpoint_path, yoso_config_file, pytorch_dump_path): orig_state_dict = torch.load(checkpoint_path, map_location="cpu")["model_state_dict"] config = YosoConfig.from_json_file(yoso_config_file) model = YosoForMaskedLM(config) new_state_dict = convert_checkpoint_helper(config.max_position_embeddings, orig_state_dict) print(model.load_state_dict(new_state_dict)) model.eval() model.save_pretrained(pytorch_dump_path) print(f"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The json file for YOSO model config.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert YOSO checkpoints from the original repository. URL: https://github.com/mlpen/YOSO""" import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def rename_key(orig_key): if "model" in orig_key: orig_key = orig_key.replace("model.", "") if "norm1" in orig_key: orig_key = orig_key.replace("norm1", "attention.output.LayerNorm") if "norm2" in orig_key: orig_key = orig_key.replace("norm2", "output.LayerNorm") if "norm" in orig_key: orig_key = orig_key.replace("norm", "LayerNorm") if "transformer" in orig_key: layer_num = orig_key.split(".")[0].split("_")[-1] orig_key = orig_key.replace(f"transformer_{layer_num}", f"encoder.layer.{layer_num}") if "mha.attn" in orig_key: orig_key = orig_key.replace("mha.attn", "attention.self") if "mha" in orig_key: orig_key = orig_key.replace("mha", "attention") if "W_q" in orig_key: orig_key = orig_key.replace("W_q", "self.query") if "W_k" in orig_key: orig_key = orig_key.replace("W_k", "self.key") if "W_v" in orig_key: orig_key = orig_key.replace("W_v", "self.value") if "ff1" in orig_key: orig_key = orig_key.replace("ff1", "intermediate.dense") if "ff2" in orig_key: orig_key = orig_key.replace("ff2", "output.dense") if "ff" in orig_key: orig_key = orig_key.replace("ff", "output.dense") if "mlm_class" in orig_key: orig_key = orig_key.replace("mlm.mlm_class", "cls.predictions.decoder") if "mlm" in orig_key: orig_key = orig_key.replace("mlm", "cls.predictions.transform") if "cls" not in orig_key: orig_key = "yoso." + orig_key return orig_key def convert_checkpoint_helper(max_position_embeddings, orig_state_dict): for key in orig_state_dict.copy().keys(): val = orig_state_dict.pop(key) if ("pooler" in key) or ("sen_class" in key): continue else: orig_state_dict[rename_key(key)] = val orig_state_dict["cls.predictions.bias"] = orig_state_dict["cls.predictions.decoder.bias"] orig_state_dict["yoso.embeddings.position_ids"] = torch.arange(max_position_embeddings).expand((1, -1)) + 2 return orig_state_dict def convert_yoso_checkpoint(checkpoint_path, yoso_config_file, pytorch_dump_path): orig_state_dict = torch.load(checkpoint_path, map_location="cpu")["model_state_dict"] config = YosoConfig.from_json_file(yoso_config_file) model = YosoForMaskedLM(config) new_state_dict = convert_checkpoint_helper(config.max_position_embeddings, orig_state_dict) print(model.load_state_dict(new_state_dict)) model.eval() model.save_pretrained(pytorch_dump_path) print(f"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The json file for YOSO model config.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
-1
huggingface/transformers
20,304
fix device issue
# What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
ydshieh
"2022-11-17T17:48:16Z"
"2022-11-21T09:12:26Z"
d316037ad71f8748aac9045ffd96970826456a04
8503cc755050c6ed5bc771e3244c29b71be1841e
fix device issue. # What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
./src/transformers/models/realm/tokenization_realm_fast.py
# coding=utf-8 # Copyright 2022 The REALM authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fast Tokenization classes for REALM.""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_realm import RealmTokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "google/realm-cc-news-pretrained-embedder": ( "https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt" ), "google/realm-cc-news-pretrained-encoder": ( "https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt" ), "google/realm-cc-news-pretrained-scorer": ( "https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt" ), "google/realm-cc-news-pretrained-openqa": ( "https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt" ), "google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt", "google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt", "google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt", "google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt", }, "tokenizer_file": { "google/realm-cc-news-pretrained-embedder": ( "https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont" ), "google/realm-cc-news-pretrained-encoder": ( "https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json" ), "google/realm-cc-news-pretrained-scorer": ( "https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json" ), "google/realm-cc-news-pretrained-openqa": ( "https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json" ), "google/realm-orqa-nq-openqa": ( "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json" ), "google/realm-orqa-nq-reader": ( "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json" ), "google/realm-orqa-wq-openqa": ( "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json" ), "google/realm-orqa-wq-reader": ( "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json" ), }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "google/realm-cc-news-pretrained-embedder": 512, "google/realm-cc-news-pretrained-encoder": 512, "google/realm-cc-news-pretrained-scorer": 512, "google/realm-cc-news-pretrained-openqa": 512, "google/realm-orqa-nq-openqa": 512, "google/realm-orqa-nq-reader": 512, "google/realm-orqa-wq-openqa": 512, "google/realm-orqa-wq-reader": 512, } PRETRAINED_INIT_CONFIGURATION = { "google/realm-cc-news-pretrained-embedder": {"do_lower_case": True}, "google/realm-cc-news-pretrained-encoder": {"do_lower_case": True}, "google/realm-cc-news-pretrained-scorer": {"do_lower_case": True}, "google/realm-cc-news-pretrained-openqa": {"do_lower_case": True}, "google/realm-orqa-nq-openqa": {"do_lower_case": True}, "google/realm-orqa-nq-reader": {"do_lower_case": True}, "google/realm-orqa-wq-openqa": {"do_lower_case": True}, "google/realm-orqa-wq-reader": {"do_lower_case": True}, } class RealmTokenizerFast(PreTrainedTokenizerFast): r""" Construct a "fast" REALM tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece. [`RealmTokenizerFast`] is identical to [`BertTokenizerFast`] and runs end-to-end tokenization: punctuation splitting and wordpiece. This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. clean_text (`bool`, *optional*, defaults to `True`): Whether or not to clean the text before tokenization by removing any control characters and replacing all whitespaces by the classic one. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). wordpieces_prefix (`str`, *optional*, defaults to `"##"`): The prefix for subwords. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES slow_tokenizer_class = RealmTokenizer def __init__( self, vocab_file=None, tokenizer_file=None, do_lower_case=True, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", tokenize_chinese_chars=True, strip_accents=None, **kwargs ): super().__init__( vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs, ) normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get("lowercase", do_lower_case) != do_lower_case or normalizer_state.get("strip_accents", strip_accents) != strip_accents or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars ): normalizer_class = getattr(normalizers, normalizer_state.pop("type")) normalizer_state["lowercase"] = do_lower_case normalizer_state["strip_accents"] = strip_accents normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state) self.do_lower_case = do_lower_case def batch_encode_candidates(self, text, **kwargs): r""" Encode a batch of text or text pair. This method is similar to regular __call__ method but has the following differences: 1. Handle additional num_candidate axis. (batch_size, num_candidates, text) 2. Always pad the sequences to *max_length*. 3. Must specify *max_length* in order to stack packs of candidates into a batch. - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: text (`List[List[str]]`): The batch of sequences to be encoded. Each sequence must be in this format: (batch_size, num_candidates, text). text_pair (`List[List[str]]`, *optional*): The batch of sequences to be encoded. Each sequence must be in this format: (batch_size, num_candidates, text). **kwargs: Keyword arguments of the __call__ method. Returns: [`BatchEncoding`]: Encoded text or text pair. Example: ```python >>> from transformers import RealmTokenizerFast >>> # batch_size = 2, num_candidates = 2 >>> text = [["Hello world!", "Nice to meet you!"], ["The cute cat.", "The adorable dog."]] >>> tokenizer = RealmTokenizerFast.from_pretrained("google/realm-cc-news-pretrained-encoder") >>> tokenized_text = tokenizer.batch_encode_candidates(text, max_length=10, return_tensors="pt") ```""" # Always using a fixed sequence length to encode in order to stack candidates into a batch. kwargs["padding"] = PaddingStrategy.MAX_LENGTH batch_text = text batch_text_pair = kwargs.pop("text_pair", None) return_tensors = kwargs.pop("return_tensors", None) output_data = { "input_ids": [], "attention_mask": [], "token_type_ids": [], } for idx, candidate_text in enumerate(batch_text): if batch_text_pair is not None: candidate_text_pair = batch_text_pair[idx] else: candidate_text_pair = None encoded_candidates = super().__call__(candidate_text, candidate_text_pair, return_tensors=None, **kwargs) encoded_input_ids = encoded_candidates.get("input_ids") encoded_attention_mask = encoded_candidates.get("attention_mask") encoded_token_type_ids = encoded_candidates.get("token_type_ids") if encoded_input_ids is not None: output_data["input_ids"].append(encoded_input_ids) if encoded_attention_mask is not None: output_data["attention_mask"].append(encoded_attention_mask) if encoded_token_type_ids is not None: output_data["token_type_ids"].append(encoded_token_type_ids) output_data = dict((key, item) for key, item in output_data.items() if len(item) != 0) return BatchEncoding(output_data, tensor_type=return_tensors) def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A REALM sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id] if token_ids_1: output += token_ids_1 + [self.sep_token_id] return output def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A REALM sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files)
# coding=utf-8 # Copyright 2022 The REALM authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fast Tokenization classes for REALM.""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_realm import RealmTokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "google/realm-cc-news-pretrained-embedder": ( "https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt" ), "google/realm-cc-news-pretrained-encoder": ( "https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt" ), "google/realm-cc-news-pretrained-scorer": ( "https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt" ), "google/realm-cc-news-pretrained-openqa": ( "https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt" ), "google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt", "google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt", "google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt", "google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt", }, "tokenizer_file": { "google/realm-cc-news-pretrained-embedder": ( "https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont" ), "google/realm-cc-news-pretrained-encoder": ( "https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json" ), "google/realm-cc-news-pretrained-scorer": ( "https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json" ), "google/realm-cc-news-pretrained-openqa": ( "https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json" ), "google/realm-orqa-nq-openqa": ( "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json" ), "google/realm-orqa-nq-reader": ( "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json" ), "google/realm-orqa-wq-openqa": ( "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json" ), "google/realm-orqa-wq-reader": ( "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json" ), }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "google/realm-cc-news-pretrained-embedder": 512, "google/realm-cc-news-pretrained-encoder": 512, "google/realm-cc-news-pretrained-scorer": 512, "google/realm-cc-news-pretrained-openqa": 512, "google/realm-orqa-nq-openqa": 512, "google/realm-orqa-nq-reader": 512, "google/realm-orqa-wq-openqa": 512, "google/realm-orqa-wq-reader": 512, } PRETRAINED_INIT_CONFIGURATION = { "google/realm-cc-news-pretrained-embedder": {"do_lower_case": True}, "google/realm-cc-news-pretrained-encoder": {"do_lower_case": True}, "google/realm-cc-news-pretrained-scorer": {"do_lower_case": True}, "google/realm-cc-news-pretrained-openqa": {"do_lower_case": True}, "google/realm-orqa-nq-openqa": {"do_lower_case": True}, "google/realm-orqa-nq-reader": {"do_lower_case": True}, "google/realm-orqa-wq-openqa": {"do_lower_case": True}, "google/realm-orqa-wq-reader": {"do_lower_case": True}, } class RealmTokenizerFast(PreTrainedTokenizerFast): r""" Construct a "fast" REALM tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece. [`RealmTokenizerFast`] is identical to [`BertTokenizerFast`] and runs end-to-end tokenization: punctuation splitting and wordpiece. This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. clean_text (`bool`, *optional*, defaults to `True`): Whether or not to clean the text before tokenization by removing any control characters and replacing all whitespaces by the classic one. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). wordpieces_prefix (`str`, *optional*, defaults to `"##"`): The prefix for subwords. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES slow_tokenizer_class = RealmTokenizer def __init__( self, vocab_file=None, tokenizer_file=None, do_lower_case=True, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", tokenize_chinese_chars=True, strip_accents=None, **kwargs ): super().__init__( vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs, ) normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get("lowercase", do_lower_case) != do_lower_case or normalizer_state.get("strip_accents", strip_accents) != strip_accents or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars ): normalizer_class = getattr(normalizers, normalizer_state.pop("type")) normalizer_state["lowercase"] = do_lower_case normalizer_state["strip_accents"] = strip_accents normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state) self.do_lower_case = do_lower_case def batch_encode_candidates(self, text, **kwargs): r""" Encode a batch of text or text pair. This method is similar to regular __call__ method but has the following differences: 1. Handle additional num_candidate axis. (batch_size, num_candidates, text) 2. Always pad the sequences to *max_length*. 3. Must specify *max_length* in order to stack packs of candidates into a batch. - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: text (`List[List[str]]`): The batch of sequences to be encoded. Each sequence must be in this format: (batch_size, num_candidates, text). text_pair (`List[List[str]]`, *optional*): The batch of sequences to be encoded. Each sequence must be in this format: (batch_size, num_candidates, text). **kwargs: Keyword arguments of the __call__ method. Returns: [`BatchEncoding`]: Encoded text or text pair. Example: ```python >>> from transformers import RealmTokenizerFast >>> # batch_size = 2, num_candidates = 2 >>> text = [["Hello world!", "Nice to meet you!"], ["The cute cat.", "The adorable dog."]] >>> tokenizer = RealmTokenizerFast.from_pretrained("google/realm-cc-news-pretrained-encoder") >>> tokenized_text = tokenizer.batch_encode_candidates(text, max_length=10, return_tensors="pt") ```""" # Always using a fixed sequence length to encode in order to stack candidates into a batch. kwargs["padding"] = PaddingStrategy.MAX_LENGTH batch_text = text batch_text_pair = kwargs.pop("text_pair", None) return_tensors = kwargs.pop("return_tensors", None) output_data = { "input_ids": [], "attention_mask": [], "token_type_ids": [], } for idx, candidate_text in enumerate(batch_text): if batch_text_pair is not None: candidate_text_pair = batch_text_pair[idx] else: candidate_text_pair = None encoded_candidates = super().__call__(candidate_text, candidate_text_pair, return_tensors=None, **kwargs) encoded_input_ids = encoded_candidates.get("input_ids") encoded_attention_mask = encoded_candidates.get("attention_mask") encoded_token_type_ids = encoded_candidates.get("token_type_ids") if encoded_input_ids is not None: output_data["input_ids"].append(encoded_input_ids) if encoded_attention_mask is not None: output_data["attention_mask"].append(encoded_attention_mask) if encoded_token_type_ids is not None: output_data["token_type_ids"].append(encoded_token_type_ids) output_data = dict((key, item) for key, item in output_data.items() if len(item) != 0) return BatchEncoding(output_data, tensor_type=return_tensors) def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A REALM sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id] if token_ids_1: output += token_ids_1 + [self.sep_token_id] return output def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A REALM sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files)
-1
huggingface/transformers
20,304
fix device issue
# What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
ydshieh
"2022-11-17T17:48:16Z"
"2022-11-21T09:12:26Z"
d316037ad71f8748aac9045ffd96970826456a04
8503cc755050c6ed5bc771e3244c29b71be1841e
fix device issue. # What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
./src/transformers/models/nat/modeling_nat.py
# coding=utf-8 # Copyright 2022 SHI Labs and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Neighborhood Attention Transformer model.""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( ModelOutput, OptionalDependencyNotAvailable, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_natten_available, logging, requires_backends, ) from .configuration_nat import NatConfig if is_natten_available(): from natten.functional import natten2dav, natten2dqkrpb else: def natten2dqkrpb(*args, **kwargs): raise OptionalDependencyNotAvailable() def natten2dav(*args, **kwargs): raise OptionalDependencyNotAvailable() logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "NatConfig" _FEAT_EXTRACTOR_FOR_DOC = "AutoImageProcessor" # Base docstring _CHECKPOINT_FOR_DOC = "shi-labs/nat-mini-in1k-224" _EXPECTED_OUTPUT_SHAPE = [1, 7, 7, 512] # Image classification docstring _IMAGE_CLASS_CHECKPOINT = "shi-labs/nat-mini-in1k-224" _IMAGE_CLASS_EXPECTED_OUTPUT = "tiger cat" NAT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "shi-labs/nat-mini-in1k-224", # See all Nat models at https://huggingface.co/models?filter=nat ] # drop_path and NatDropPath are from the timm library. @dataclass class NatEncoderOutput(ModelOutput): """ Nat encoder's outputs, with potential hidden states and attentions. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None @dataclass class NatModelOutput(ModelOutput): """ Nat model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed): Average pooling of the last layer hidden-state. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ last_hidden_state: torch.FloatTensor = None pooler_output: Optional[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None @dataclass class NatImageClassifierOutput(ModelOutput): """ Nat outputs for image classification. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None class NatEmbeddings(nn.Module): """ Construct the patch and position embeddings. """ def __init__(self, config): super().__init__() self.patch_embeddings = NatPatchEmbeddings(config) self.norm = nn.LayerNorm(config.embed_dim) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, pixel_values: Optional[torch.FloatTensor]) -> Tuple[torch.Tensor]: embeddings = self.patch_embeddings(pixel_values) embeddings = self.norm(embeddings) embeddings = self.dropout(embeddings) return embeddings class NatPatchEmbeddings(nn.Module): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, height, width, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config): super().__init__() patch_size = config.patch_size num_channels, hidden_size = config.num_channels, config.embed_dim self.num_channels = num_channels if patch_size == 4: pass else: # TODO: Support arbitrary patch sizes. raise ValueError("Dinat only supports patch size of 4 at the moment.") self.projection = nn.Sequential( nn.Conv2d(self.num_channels, hidden_size // 2, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)), nn.Conv2d(hidden_size // 2, hidden_size, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)), ) def forward(self, pixel_values: Optional[torch.FloatTensor]) -> torch.Tensor: _, num_channels, height, width = pixel_values.shape if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) embeddings = self.projection(pixel_values) embeddings = embeddings.permute(0, 2, 3, 1) return embeddings class NatDownsampler(nn.Module): """ Convolutional Downsampling Layer. Args: dim (`int`): Number of input channels. norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`): Normalization layer class. """ def __init__(self, dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None: super().__init__() self.dim = dim self.reduction = nn.Conv2d(dim, 2 * dim, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) self.norm = norm_layer(2 * dim) def forward(self, input_feature: torch.Tensor) -> torch.Tensor: input_feature = self.reduction(input_feature.permute(0, 3, 1, 2)).permute(0, 2, 3, 1) input_feature = self.norm(input_feature) return input_feature # Copied from transformers.models.beit.modeling_beit.drop_path def drop_path(input, drop_prob=0.0, training=False, scale_by_keep=True): """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() # binarize output = input.div(keep_prob) * random_tensor return output # Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->Nat class NatDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, x: torch.Tensor) -> torch.Tensor: return drop_path(x, self.drop_prob, self.training) def extra_repr(self) -> str: return "p={}".format(self.drop_prob) class NeighborhoodAttention(nn.Module): def __init__(self, config, dim, num_heads, kernel_size): super().__init__() if dim % num_heads != 0: raise ValueError( f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})" ) self.num_attention_heads = num_heads self.attention_head_size = int(dim / num_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.kernel_size = kernel_size # rpb is learnable relative positional biases; same concept is used Swin. self.rpb = nn.Parameter(torch.zeros(num_heads, (2 * self.kernel_size - 1), (2 * self.kernel_size - 1))) self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 3, 1, 2, 4) def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: query_layer = self.transpose_for_scores(self.query(hidden_states)) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) # Apply the scale factor before computing attention weights. It's usually more efficient because # attention weights are typically a bigger tensor compared to query. # It gives identical results because scalars are commutable in matrix multiplication. query_layer = query_layer / math.sqrt(self.attention_head_size) # Compute NA between "query" and "key" to get the raw attention scores, and add relative positional biases. attention_scores = natten2dqkrpb(query_layer, key_layer, self.rpb, 1) # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) context_layer = natten2dav(attention_probs, value_layer, 1) context_layer = context_layer.permute(0, 2, 3, 1, 4).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class NeighborhoodAttentionOutput(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, dim) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class NeighborhoodAttentionModule(nn.Module): def __init__(self, config, dim, num_heads, kernel_size): super().__init__() self.self = NeighborhoodAttention(config, dim, num_heads, kernel_size) self.output = NeighborhoodAttentionOutput(config, dim) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: self_outputs = self.self(hidden_states, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class NatIntermediate(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, int(config.mlp_ratio * dim)) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class NatOutput(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(int(config.mlp_ratio * dim), dim) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class NatLayer(nn.Module): def __init__(self, config, dim, num_heads, drop_path_rate=0.0): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.kernel_size = config.kernel_size self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.attention = NeighborhoodAttentionModule(config, dim, num_heads, kernel_size=self.kernel_size) self.drop_path = NatDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.intermediate = NatIntermediate(config, dim) self.output = NatOutput(config, dim) def maybe_pad(self, hidden_states, height, width): window_size = self.kernel_size pad_values = (0, 0, 0, 0, 0, 0) if height < window_size or width < window_size: pad_l = pad_t = 0 pad_r = max(0, window_size - width) pad_b = max(0, window_size - height) pad_values = (0, 0, pad_l, pad_r, pad_t, pad_b) hidden_states = nn.functional.pad(hidden_states, pad_values) return hidden_states, pad_values def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor, torch.Tensor]: batch_size, height, width, channels = hidden_states.size() shortcut = hidden_states hidden_states = self.layernorm_before(hidden_states) # pad hidden_states if they are smaller than kernel size hidden_states, pad_values = self.maybe_pad(hidden_states, height, width) _, height_pad, width_pad, _ = hidden_states.shape attention_outputs = self.attention(hidden_states, output_attentions=output_attentions) attention_output = attention_outputs[0] was_padded = pad_values[3] > 0 or pad_values[5] > 0 if was_padded: attention_output = attention_output[:, :height, :width, :].contiguous() hidden_states = shortcut + self.drop_path(attention_output) layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) layer_output = hidden_states + self.output(layer_output) layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,) return layer_outputs class NatStage(nn.Module): def __init__(self, config, dim, depth, num_heads, drop_path_rate, downsample): super().__init__() self.config = config self.dim = dim self.layers = nn.ModuleList( [ NatLayer( config=config, dim=dim, num_heads=num_heads, drop_path_rate=drop_path_rate[i], ) for i in range(depth) ] ) # patch merging layer if downsample is not None: self.downsample = downsample(dim=dim, norm_layer=nn.LayerNorm) else: self.downsample = None self.pointing = False def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: _, height, width, _ = hidden_states.size() for i, layer_module in enumerate(self.layers): layer_outputs = layer_module(hidden_states, output_attentions) hidden_states = layer_outputs[0] if self.downsample is not None: height_downsampled, width_downsampled = (height + 1) // 2, (width + 1) // 2 output_dimensions = (height, width, height_downsampled, width_downsampled) hidden_states = self.downsample(layer_outputs[0]) else: output_dimensions = (height, width, height, width) stage_outputs = (hidden_states, output_dimensions) if output_attentions: stage_outputs += layer_outputs[1:] return stage_outputs class NatEncoder(nn.Module): def __init__(self, config): super().__init__() self.num_levels = len(config.depths) self.config = config dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))] self.levels = nn.ModuleList( [ NatStage( config=config, dim=int(config.embed_dim * 2**i_layer), depth=config.depths[i_layer], num_heads=config.num_heads[i_layer], drop_path_rate=dpr[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])], downsample=NatDownsampler if (i_layer < self.num_levels - 1) else None, ) for i_layer in range(self.num_levels) ] ) def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple, NatEncoderOutput]: all_hidden_states = () if output_hidden_states else None all_reshaped_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if output_hidden_states: # rearrange b h w c -> b c h w reshaped_hidden_state = hidden_states.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) for i, layer_module in enumerate(self.levels): layer_outputs = layer_module(hidden_states, output_attentions) hidden_states = layer_outputs[0] if output_hidden_states: # rearrange b h w c -> b c h w reshaped_hidden_state = hidden_states.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) if output_attentions: all_self_attentions += layer_outputs[2:] if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return NatEncoderOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, reshaped_hidden_states=all_reshaped_hidden_states, ) class NatPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = NatConfig base_model_prefix = "nat" main_input_name = "pixel_values" def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) def _set_gradient_checkpointing(self, module: NatEncoder, value: bool = False) -> None: pass NAT_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`NatConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ NAT_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`AutoImageProcessor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare Nat Model transformer outputting raw hidden-states without any specific head on top.", NAT_START_DOCSTRING, ) class NatModel(NatPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) requires_backends(self, ["natten"]) self.config = config self.num_levels = len(config.depths) self.num_features = int(config.embed_dim * 2 ** (self.num_levels - 1)) self.embeddings = NatEmbeddings(config) self.encoder = NatEncoder(config) self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps) self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(NAT_INPUTS_DOCSTRING) @add_code_sample_docstrings( processor_class=_FEAT_EXTRACTOR_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=NatModelOutput, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, NatModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") embedding_output = self.embeddings(pixel_values) encoder_outputs = self.encoder( embedding_output, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = None if self.pooler is not None: pooled_output = self.pooler(sequence_output.flatten(1, 2).transpose(1, 2)) pooled_output = torch.flatten(pooled_output, 1) if not return_dict: output = (sequence_output, pooled_output) + encoder_outputs[1:] return output return NatModelOutput( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, reshaped_hidden_states=encoder_outputs.reshaped_hidden_states, ) @add_start_docstrings( """ Nat Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet. """, NAT_START_DOCSTRING, ) class NatForImageClassification(NatPreTrainedModel): def __init__(self, config): super().__init__(config) requires_backends(self, ["natten"]) self.num_labels = config.num_labels self.nat = NatModel(config) # Classifier head self.classifier = ( nn.Linear(self.nat.num_features, config.num_labels) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(NAT_INPUTS_DOCSTRING) @add_code_sample_docstrings( processor_class=_FEAT_EXTRACTOR_FOR_DOC, checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=NatImageClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, NatImageClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.nat( pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return NatImageClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, reshaped_hidden_states=outputs.reshaped_hidden_states, )
# coding=utf-8 # Copyright 2022 SHI Labs and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Neighborhood Attention Transformer model.""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( ModelOutput, OptionalDependencyNotAvailable, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_natten_available, logging, requires_backends, ) from .configuration_nat import NatConfig if is_natten_available(): from natten.functional import natten2dav, natten2dqkrpb else: def natten2dqkrpb(*args, **kwargs): raise OptionalDependencyNotAvailable() def natten2dav(*args, **kwargs): raise OptionalDependencyNotAvailable() logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "NatConfig" _FEAT_EXTRACTOR_FOR_DOC = "AutoImageProcessor" # Base docstring _CHECKPOINT_FOR_DOC = "shi-labs/nat-mini-in1k-224" _EXPECTED_OUTPUT_SHAPE = [1, 7, 7, 512] # Image classification docstring _IMAGE_CLASS_CHECKPOINT = "shi-labs/nat-mini-in1k-224" _IMAGE_CLASS_EXPECTED_OUTPUT = "tiger cat" NAT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "shi-labs/nat-mini-in1k-224", # See all Nat models at https://huggingface.co/models?filter=nat ] # drop_path and NatDropPath are from the timm library. @dataclass class NatEncoderOutput(ModelOutput): """ Nat encoder's outputs, with potential hidden states and attentions. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None @dataclass class NatModelOutput(ModelOutput): """ Nat model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed): Average pooling of the last layer hidden-state. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ last_hidden_state: torch.FloatTensor = None pooler_output: Optional[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None @dataclass class NatImageClassifierOutput(ModelOutput): """ Nat outputs for image classification. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None class NatEmbeddings(nn.Module): """ Construct the patch and position embeddings. """ def __init__(self, config): super().__init__() self.patch_embeddings = NatPatchEmbeddings(config) self.norm = nn.LayerNorm(config.embed_dim) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, pixel_values: Optional[torch.FloatTensor]) -> Tuple[torch.Tensor]: embeddings = self.patch_embeddings(pixel_values) embeddings = self.norm(embeddings) embeddings = self.dropout(embeddings) return embeddings class NatPatchEmbeddings(nn.Module): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, height, width, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config): super().__init__() patch_size = config.patch_size num_channels, hidden_size = config.num_channels, config.embed_dim self.num_channels = num_channels if patch_size == 4: pass else: # TODO: Support arbitrary patch sizes. raise ValueError("Dinat only supports patch size of 4 at the moment.") self.projection = nn.Sequential( nn.Conv2d(self.num_channels, hidden_size // 2, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)), nn.Conv2d(hidden_size // 2, hidden_size, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)), ) def forward(self, pixel_values: Optional[torch.FloatTensor]) -> torch.Tensor: _, num_channels, height, width = pixel_values.shape if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) embeddings = self.projection(pixel_values) embeddings = embeddings.permute(0, 2, 3, 1) return embeddings class NatDownsampler(nn.Module): """ Convolutional Downsampling Layer. Args: dim (`int`): Number of input channels. norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`): Normalization layer class. """ def __init__(self, dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None: super().__init__() self.dim = dim self.reduction = nn.Conv2d(dim, 2 * dim, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) self.norm = norm_layer(2 * dim) def forward(self, input_feature: torch.Tensor) -> torch.Tensor: input_feature = self.reduction(input_feature.permute(0, 3, 1, 2)).permute(0, 2, 3, 1) input_feature = self.norm(input_feature) return input_feature # Copied from transformers.models.beit.modeling_beit.drop_path def drop_path(input, drop_prob=0.0, training=False, scale_by_keep=True): """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() # binarize output = input.div(keep_prob) * random_tensor return output # Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->Nat class NatDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, x: torch.Tensor) -> torch.Tensor: return drop_path(x, self.drop_prob, self.training) def extra_repr(self) -> str: return "p={}".format(self.drop_prob) class NeighborhoodAttention(nn.Module): def __init__(self, config, dim, num_heads, kernel_size): super().__init__() if dim % num_heads != 0: raise ValueError( f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})" ) self.num_attention_heads = num_heads self.attention_head_size = int(dim / num_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.kernel_size = kernel_size # rpb is learnable relative positional biases; same concept is used Swin. self.rpb = nn.Parameter(torch.zeros(num_heads, (2 * self.kernel_size - 1), (2 * self.kernel_size - 1))) self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 3, 1, 2, 4) def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: query_layer = self.transpose_for_scores(self.query(hidden_states)) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) # Apply the scale factor before computing attention weights. It's usually more efficient because # attention weights are typically a bigger tensor compared to query. # It gives identical results because scalars are commutable in matrix multiplication. query_layer = query_layer / math.sqrt(self.attention_head_size) # Compute NA between "query" and "key" to get the raw attention scores, and add relative positional biases. attention_scores = natten2dqkrpb(query_layer, key_layer, self.rpb, 1) # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) context_layer = natten2dav(attention_probs, value_layer, 1) context_layer = context_layer.permute(0, 2, 3, 1, 4).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class NeighborhoodAttentionOutput(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, dim) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class NeighborhoodAttentionModule(nn.Module): def __init__(self, config, dim, num_heads, kernel_size): super().__init__() self.self = NeighborhoodAttention(config, dim, num_heads, kernel_size) self.output = NeighborhoodAttentionOutput(config, dim) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: self_outputs = self.self(hidden_states, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class NatIntermediate(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, int(config.mlp_ratio * dim)) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class NatOutput(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(int(config.mlp_ratio * dim), dim) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class NatLayer(nn.Module): def __init__(self, config, dim, num_heads, drop_path_rate=0.0): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.kernel_size = config.kernel_size self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.attention = NeighborhoodAttentionModule(config, dim, num_heads, kernel_size=self.kernel_size) self.drop_path = NatDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.intermediate = NatIntermediate(config, dim) self.output = NatOutput(config, dim) def maybe_pad(self, hidden_states, height, width): window_size = self.kernel_size pad_values = (0, 0, 0, 0, 0, 0) if height < window_size or width < window_size: pad_l = pad_t = 0 pad_r = max(0, window_size - width) pad_b = max(0, window_size - height) pad_values = (0, 0, pad_l, pad_r, pad_t, pad_b) hidden_states = nn.functional.pad(hidden_states, pad_values) return hidden_states, pad_values def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor, torch.Tensor]: batch_size, height, width, channels = hidden_states.size() shortcut = hidden_states hidden_states = self.layernorm_before(hidden_states) # pad hidden_states if they are smaller than kernel size hidden_states, pad_values = self.maybe_pad(hidden_states, height, width) _, height_pad, width_pad, _ = hidden_states.shape attention_outputs = self.attention(hidden_states, output_attentions=output_attentions) attention_output = attention_outputs[0] was_padded = pad_values[3] > 0 or pad_values[5] > 0 if was_padded: attention_output = attention_output[:, :height, :width, :].contiguous() hidden_states = shortcut + self.drop_path(attention_output) layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) layer_output = hidden_states + self.output(layer_output) layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,) return layer_outputs class NatStage(nn.Module): def __init__(self, config, dim, depth, num_heads, drop_path_rate, downsample): super().__init__() self.config = config self.dim = dim self.layers = nn.ModuleList( [ NatLayer( config=config, dim=dim, num_heads=num_heads, drop_path_rate=drop_path_rate[i], ) for i in range(depth) ] ) # patch merging layer if downsample is not None: self.downsample = downsample(dim=dim, norm_layer=nn.LayerNorm) else: self.downsample = None self.pointing = False def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: _, height, width, _ = hidden_states.size() for i, layer_module in enumerate(self.layers): layer_outputs = layer_module(hidden_states, output_attentions) hidden_states = layer_outputs[0] if self.downsample is not None: height_downsampled, width_downsampled = (height + 1) // 2, (width + 1) // 2 output_dimensions = (height, width, height_downsampled, width_downsampled) hidden_states = self.downsample(layer_outputs[0]) else: output_dimensions = (height, width, height, width) stage_outputs = (hidden_states, output_dimensions) if output_attentions: stage_outputs += layer_outputs[1:] return stage_outputs class NatEncoder(nn.Module): def __init__(self, config): super().__init__() self.num_levels = len(config.depths) self.config = config dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))] self.levels = nn.ModuleList( [ NatStage( config=config, dim=int(config.embed_dim * 2**i_layer), depth=config.depths[i_layer], num_heads=config.num_heads[i_layer], drop_path_rate=dpr[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])], downsample=NatDownsampler if (i_layer < self.num_levels - 1) else None, ) for i_layer in range(self.num_levels) ] ) def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple, NatEncoderOutput]: all_hidden_states = () if output_hidden_states else None all_reshaped_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if output_hidden_states: # rearrange b h w c -> b c h w reshaped_hidden_state = hidden_states.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) for i, layer_module in enumerate(self.levels): layer_outputs = layer_module(hidden_states, output_attentions) hidden_states = layer_outputs[0] if output_hidden_states: # rearrange b h w c -> b c h w reshaped_hidden_state = hidden_states.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) if output_attentions: all_self_attentions += layer_outputs[2:] if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return NatEncoderOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, reshaped_hidden_states=all_reshaped_hidden_states, ) class NatPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = NatConfig base_model_prefix = "nat" main_input_name = "pixel_values" def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) def _set_gradient_checkpointing(self, module: NatEncoder, value: bool = False) -> None: pass NAT_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`NatConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ NAT_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`AutoImageProcessor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare Nat Model transformer outputting raw hidden-states without any specific head on top.", NAT_START_DOCSTRING, ) class NatModel(NatPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) requires_backends(self, ["natten"]) self.config = config self.num_levels = len(config.depths) self.num_features = int(config.embed_dim * 2 ** (self.num_levels - 1)) self.embeddings = NatEmbeddings(config) self.encoder = NatEncoder(config) self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps) self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(NAT_INPUTS_DOCSTRING) @add_code_sample_docstrings( processor_class=_FEAT_EXTRACTOR_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=NatModelOutput, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, NatModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") embedding_output = self.embeddings(pixel_values) encoder_outputs = self.encoder( embedding_output, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = None if self.pooler is not None: pooled_output = self.pooler(sequence_output.flatten(1, 2).transpose(1, 2)) pooled_output = torch.flatten(pooled_output, 1) if not return_dict: output = (sequence_output, pooled_output) + encoder_outputs[1:] return output return NatModelOutput( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, reshaped_hidden_states=encoder_outputs.reshaped_hidden_states, ) @add_start_docstrings( """ Nat Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet. """, NAT_START_DOCSTRING, ) class NatForImageClassification(NatPreTrainedModel): def __init__(self, config): super().__init__(config) requires_backends(self, ["natten"]) self.num_labels = config.num_labels self.nat = NatModel(config) # Classifier head self.classifier = ( nn.Linear(self.nat.num_features, config.num_labels) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(NAT_INPUTS_DOCSTRING) @add_code_sample_docstrings( processor_class=_FEAT_EXTRACTOR_FOR_DOC, checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=NatImageClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, NatImageClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.nat( pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return NatImageClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, reshaped_hidden_states=outputs.reshaped_hidden_states, )
-1
huggingface/transformers
20,304
fix device issue
# What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
ydshieh
"2022-11-17T17:48:16Z"
"2022-11-21T09:12:26Z"
d316037ad71f8748aac9045ffd96970826456a04
8503cc755050c6ed5bc771e3244c29b71be1841e
fix device issue. # What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
./examples/research_projects/jax-projects/big_bird/evaluate.py
from datasets import load_from_disk import jax import jax.numpy as jnp from bigbird_flax import FlaxBigBirdForNaturalQuestions from transformers import BigBirdTokenizerFast CATEGORY_MAPPING = {0: "null", 1: "short", 2: "long", 3: "yes", 4: "no"} PUNCTUATION_SET_TO_EXCLUDE = set("".join(["‘", "’", "´", "`", ".", ",", "-", '"'])) def get_sub_answers(answers, begin=0, end=None): return [" ".join(x.split(" ")[begin:end]) for x in answers if len(x.split(" ")) > 1] def expand_to_aliases(given_answers, make_sub_answers=False): if make_sub_answers: # if answers are longer than one word, make sure a predictions is correct if it coresponds to the complete 1: or :-1 sub word # *e.g.* if the correct answer contains a prefix such as "the", or "a" given_answers = ( given_answers + get_sub_answers(given_answers, begin=1) + get_sub_answers(given_answers, end=-1) ) answers = [] for answer in given_answers: alias = answer.replace("_", " ").lower() alias = "".join(c if c not in PUNCTUATION_SET_TO_EXCLUDE else " " for c in alias) answers.append(" ".join(alias.split()).strip()) return set(answers) def get_best_valid_start_end_idx(start_scores, end_scores, top_k=1, max_size=100): best_start_scores, best_start_idx = jax.lax.top_k(start_scores, top_k) best_end_scores, best_end_idx = jax.lax.top_k(end_scores, top_k) widths = best_end_idx[:, None] - best_start_idx[None, :] mask = jnp.logical_or(widths < 0, widths > max_size) scores = (best_end_scores[:, None] + best_start_scores[None, :]) - (1e8 * mask) best_score = jnp.argmax(scores).item() return best_start_idx[best_score % top_k], best_end_idx[best_score // top_k] def format_dataset(sample): question = sample["question"]["text"] context = sample["document"]["tokens"]["token"] is_html = sample["document"]["tokens"]["is_html"] long_answers = sample["annotations"]["long_answer"] short_answers = sample["annotations"]["short_answers"] context_string = " ".join([context[i] for i in range(len(context)) if not is_html[i]]) # 0 - No ; 1 - Yes for answer in sample["annotations"]["yes_no_answer"]: if answer == 0 or answer == 1: return { "question": question, "context": context_string, "short": [], "long": [], "category": "no" if answer == 0 else "yes", } short_targets = [] for s in short_answers: short_targets.extend(s["text"]) short_targets = list(set(short_targets)) long_targets = [] for s in long_answers: if s["start_token"] == -1: continue answer = context[s["start_token"] : s["end_token"]] html = is_html[s["start_token"] : s["end_token"]] new_answer = " ".join([answer[i] for i in range(len(answer)) if not html[i]]) if new_answer not in long_targets: long_targets.append(new_answer) category = "long_short" if len(short_targets + long_targets) > 0 else "null" return { "question": question, "context": context_string, "short": short_targets, "long": long_targets, "category": category, } def main(): dataset = load_from_disk("natural-questions-validation") dataset = dataset.map(format_dataset).remove_columns(["annotations", "document", "id"]) print(dataset) short_validation_dataset = dataset.filter(lambda x: (len(x["question"]) + len(x["context"])) < 4 * 4096) short_validation_dataset = short_validation_dataset.filter(lambda x: x["category"] != "null") short_validation_dataset model_id = "vasudevgupta/flax-bigbird-natural-questions" model = FlaxBigBirdForNaturalQuestions.from_pretrained(model_id) tokenizer = BigBirdTokenizerFast.from_pretrained(model_id) @jax.jit def forward(*args, **kwargs): start_logits, end_logits, pooled_logits = model(*args, **kwargs) return start_logits, end_logits, jnp.argmax(pooled_logits, axis=-1) def evaluate(example): # encode question and context so that they are separated by a tokenizer.sep_token and cut at max_length inputs = tokenizer( example["question"], example["context"], return_tensors="np", max_length=4096, padding="max_length", truncation=True, ) start_scores, end_scores, category = forward(**inputs) predicted_category = CATEGORY_MAPPING[category.item()] example["targets"] = example["long"] + example["short"] if example["category"] in ["yes", "no", "null"]: example["targets"] = [example["category"]] example["has_tgt"] = example["category"] != "null" # Now target can be: "yes", "no", "null", "list of long & short answers" if predicted_category in ["yes", "no", "null"]: example["output"] = [predicted_category] example["match"] = example["output"] == example["targets"] example["has_pred"] = predicted_category != "null" return example max_size = 38 if predicted_category == "short" else 1024 start_score, end_score = get_best_valid_start_end_idx( start_scores[0], end_scores[0], top_k=8, max_size=max_size ) input_ids = inputs["input_ids"][0].tolist() example["output"] = [tokenizer.decode(input_ids[start_score : end_score + 1])] answers = expand_to_aliases(example["targets"], make_sub_answers=True) predictions = expand_to_aliases(example["output"]) # some preprocessing to both prediction and answer answers = set(["".join(a.split()) for a in answers]) predictions = set(["".join(p.split()) for p in predictions]) predictions = set([s for s in predictions if s not in ["``", "''", "`", "'"]]) # if there is a common element, it's a exact match example["match"] = len(list(answers & predictions)) > 0 example["has_pred"] = predicted_category != "null" and len(predictions) > 0 return example short_validation_dataset = short_validation_dataset.map(evaluate) total = len(short_validation_dataset) matched = len(short_validation_dataset.filter(lambda x: x["match"] == 1)) print("EM score:", (matched / total) * 100, "%") if __name__ == "__main__": main()
from datasets import load_from_disk import jax import jax.numpy as jnp from bigbird_flax import FlaxBigBirdForNaturalQuestions from transformers import BigBirdTokenizerFast CATEGORY_MAPPING = {0: "null", 1: "short", 2: "long", 3: "yes", 4: "no"} PUNCTUATION_SET_TO_EXCLUDE = set("".join(["‘", "’", "´", "`", ".", ",", "-", '"'])) def get_sub_answers(answers, begin=0, end=None): return [" ".join(x.split(" ")[begin:end]) for x in answers if len(x.split(" ")) > 1] def expand_to_aliases(given_answers, make_sub_answers=False): if make_sub_answers: # if answers are longer than one word, make sure a predictions is correct if it coresponds to the complete 1: or :-1 sub word # *e.g.* if the correct answer contains a prefix such as "the", or "a" given_answers = ( given_answers + get_sub_answers(given_answers, begin=1) + get_sub_answers(given_answers, end=-1) ) answers = [] for answer in given_answers: alias = answer.replace("_", " ").lower() alias = "".join(c if c not in PUNCTUATION_SET_TO_EXCLUDE else " " for c in alias) answers.append(" ".join(alias.split()).strip()) return set(answers) def get_best_valid_start_end_idx(start_scores, end_scores, top_k=1, max_size=100): best_start_scores, best_start_idx = jax.lax.top_k(start_scores, top_k) best_end_scores, best_end_idx = jax.lax.top_k(end_scores, top_k) widths = best_end_idx[:, None] - best_start_idx[None, :] mask = jnp.logical_or(widths < 0, widths > max_size) scores = (best_end_scores[:, None] + best_start_scores[None, :]) - (1e8 * mask) best_score = jnp.argmax(scores).item() return best_start_idx[best_score % top_k], best_end_idx[best_score // top_k] def format_dataset(sample): question = sample["question"]["text"] context = sample["document"]["tokens"]["token"] is_html = sample["document"]["tokens"]["is_html"] long_answers = sample["annotations"]["long_answer"] short_answers = sample["annotations"]["short_answers"] context_string = " ".join([context[i] for i in range(len(context)) if not is_html[i]]) # 0 - No ; 1 - Yes for answer in sample["annotations"]["yes_no_answer"]: if answer == 0 or answer == 1: return { "question": question, "context": context_string, "short": [], "long": [], "category": "no" if answer == 0 else "yes", } short_targets = [] for s in short_answers: short_targets.extend(s["text"]) short_targets = list(set(short_targets)) long_targets = [] for s in long_answers: if s["start_token"] == -1: continue answer = context[s["start_token"] : s["end_token"]] html = is_html[s["start_token"] : s["end_token"]] new_answer = " ".join([answer[i] for i in range(len(answer)) if not html[i]]) if new_answer not in long_targets: long_targets.append(new_answer) category = "long_short" if len(short_targets + long_targets) > 0 else "null" return { "question": question, "context": context_string, "short": short_targets, "long": long_targets, "category": category, } def main(): dataset = load_from_disk("natural-questions-validation") dataset = dataset.map(format_dataset).remove_columns(["annotations", "document", "id"]) print(dataset) short_validation_dataset = dataset.filter(lambda x: (len(x["question"]) + len(x["context"])) < 4 * 4096) short_validation_dataset = short_validation_dataset.filter(lambda x: x["category"] != "null") short_validation_dataset model_id = "vasudevgupta/flax-bigbird-natural-questions" model = FlaxBigBirdForNaturalQuestions.from_pretrained(model_id) tokenizer = BigBirdTokenizerFast.from_pretrained(model_id) @jax.jit def forward(*args, **kwargs): start_logits, end_logits, pooled_logits = model(*args, **kwargs) return start_logits, end_logits, jnp.argmax(pooled_logits, axis=-1) def evaluate(example): # encode question and context so that they are separated by a tokenizer.sep_token and cut at max_length inputs = tokenizer( example["question"], example["context"], return_tensors="np", max_length=4096, padding="max_length", truncation=True, ) start_scores, end_scores, category = forward(**inputs) predicted_category = CATEGORY_MAPPING[category.item()] example["targets"] = example["long"] + example["short"] if example["category"] in ["yes", "no", "null"]: example["targets"] = [example["category"]] example["has_tgt"] = example["category"] != "null" # Now target can be: "yes", "no", "null", "list of long & short answers" if predicted_category in ["yes", "no", "null"]: example["output"] = [predicted_category] example["match"] = example["output"] == example["targets"] example["has_pred"] = predicted_category != "null" return example max_size = 38 if predicted_category == "short" else 1024 start_score, end_score = get_best_valid_start_end_idx( start_scores[0], end_scores[0], top_k=8, max_size=max_size ) input_ids = inputs["input_ids"][0].tolist() example["output"] = [tokenizer.decode(input_ids[start_score : end_score + 1])] answers = expand_to_aliases(example["targets"], make_sub_answers=True) predictions = expand_to_aliases(example["output"]) # some preprocessing to both prediction and answer answers = set(["".join(a.split()) for a in answers]) predictions = set(["".join(p.split()) for p in predictions]) predictions = set([s for s in predictions if s not in ["``", "''", "`", "'"]]) # if there is a common element, it's a exact match example["match"] = len(list(answers & predictions)) > 0 example["has_pred"] = predicted_category != "null" and len(predictions) > 0 return example short_validation_dataset = short_validation_dataset.map(evaluate) total = len(short_validation_dataset) matched = len(short_validation_dataset.filter(lambda x: x["match"] == 1)) print("EM score:", (matched / total) * 100, "%") if __name__ == "__main__": main()
-1
huggingface/transformers
20,304
fix device issue
# What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
ydshieh
"2022-11-17T17:48:16Z"
"2022-11-21T09:12:26Z"
d316037ad71f8748aac9045ffd96970826456a04
8503cc755050c6ed5bc771e3244c29b71be1841e
fix device issue. # What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
./tests/generation/test_utils.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ..test_modeling_common import floats_tensor, ids_tensor if is_torch_available(): import torch from transformers import ( AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer, BartForConditionalGeneration, BartTokenizer, GPT2LMHeadModel, GPT2Tokenizer, ImageGPTForCausalImageModeling, Speech2TextForConditionalGeneration, SpeechEncoderDecoderModel, T5ForConditionalGeneration, VisionEncoderDecoderModel, pipeline, top_k_top_p_filtering, ) from transformers.generation import ( BeamSampleDecoderOnlyOutput, BeamSampleEncoderDecoderOutput, BeamSearchDecoderOnlyOutput, BeamSearchEncoderDecoderOutput, BeamSearchScorer, ConstrainedBeamSearchScorer, DisjunctiveConstraint, ForcedBOSTokenLogitsProcessor, ForcedEOSTokenLogitsProcessor, GreedySearchDecoderOnlyOutput, GreedySearchEncoderDecoderOutput, HammingDiversityLogitsProcessor, InfNanRemoveLogitsProcessor, LogitsProcessorList, MaxLengthCriteria, MinLengthLogitsProcessor, NoBadWordsLogitsProcessor, NoRepeatNGramLogitsProcessor, PhrasalConstraint, RepetitionPenaltyLogitsProcessor, SampleDecoderOnlyOutput, SampleEncoderDecoderOutput, StoppingCriteria, StoppingCriteriaList, TemperatureLogitsWarper, TopKLogitsWarper, TopPLogitsWarper, ) class GenerationTesterMixin: model_tester = None all_generative_model_classes = () input_name = "input_ids" def _get_input_ids_and_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict[self.input_name] # cut to half length & take max batch_size 3 max_batch_size = 2 sequence_length = input_ids.shape[-1] // 2 input_ids = input_ids[:max_batch_size, :sequence_length] # generate max 3 tokens max_length = input_ids.shape[-1] + 3 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` config.pad_token_id = config.eos_token_id # TransfoXL has no attention mask if "transfoxl" in config.__class__.__name__.lower(): attention_mask = None else: attention_mask = torch.ones_like(input_ids, dtype=torch.long)[:max_batch_size, :sequence_length] return config, input_ids, attention_mask, max_length @staticmethod def _get_logits_processor_and_kwargs( input_length, eos_token_id, forced_bos_token_id=None, forced_eos_token_id=None, max_length=None, diversity_penalty=None, ): process_kwargs = { "min_length": input_length + 1 if max_length is None else max_length - 1, "bad_words_ids": [[1, 0]], "no_repeat_ngram_size": 2, "repetition_penalty": 1.2, } logits_processor = LogitsProcessorList( ( [ HammingDiversityLogitsProcessor(diversity_penalty, num_beams=2, num_beam_groups=2), ] if diversity_penalty is not None else [] ) + ( [ MinLengthLogitsProcessor(process_kwargs["min_length"], eos_token_id), ] if eos_token_id is not None else [] ) + ( [ ForcedBOSTokenLogitsProcessor(forced_bos_token_id), ] if forced_bos_token_id is not None else [] ) + ( [ForcedEOSTokenLogitsProcessor(max_length, forced_eos_token_id)] if forced_eos_token_id is not None else [] ) + [ NoBadWordsLogitsProcessor(process_kwargs["bad_words_ids"], eos_token_id), NoRepeatNGramLogitsProcessor(process_kwargs["no_repeat_ngram_size"]), RepetitionPenaltyLogitsProcessor(process_kwargs["repetition_penalty"]), ] ) return process_kwargs, logits_processor @staticmethod def _get_warper_and_kwargs(num_beams): warp_kwargs = {"top_k": 10, "top_p": 0.7, "temperature": 0.7} logits_warper = LogitsProcessorList( [ TemperatureLogitsWarper(warp_kwargs["temperature"]), TopKLogitsWarper(top_k=warp_kwargs["top_k"], min_tokens_to_keep=(2 if num_beams > 1 else 1)), TopPLogitsWarper(top_p=warp_kwargs["top_p"], min_tokens_to_keep=(2 if num_beams > 1 else 1)), ] ) return warp_kwargs, logits_warper @staticmethod def _get_beam_scorer_and_kwargs(batch_size, max_length, num_return_sequences=1): beam_kwargs = { "early_stopping": False, "length_penalty": 2.0, "num_beams": 2, "num_return_sequences": num_return_sequences, } beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=beam_kwargs["num_beams"], device=torch_device, length_penalty=beam_kwargs["length_penalty"], do_early_stopping=beam_kwargs["early_stopping"], num_beam_hyps_to_keep=num_return_sequences, ) return beam_kwargs, beam_scorer @staticmethod def _get_diverse_beam_scorer_and_kwargs(batch_size, max_length, num_return_sequences=1): beam_kwargs = { "early_stopping": False, "length_penalty": 2.0, "num_beams": 2, "num_return_sequences": num_return_sequences, "num_beam_groups": 2, # one beam per group "diversity_penalty": 2.0, } beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=beam_kwargs["num_beams"], device=torch_device, length_penalty=beam_kwargs["length_penalty"], do_early_stopping=beam_kwargs["early_stopping"], num_beam_hyps_to_keep=num_return_sequences, num_beam_groups=beam_kwargs["num_beam_groups"], ) return beam_kwargs, beam_scorer @staticmethod def _get_constrained_beam_scorer_and_kwargs(batch_size, max_length, constraints, num_return_sequences=1): beam_kwargs = { "early_stopping": False, "length_penalty": 2.0, "num_beams": num_return_sequences * 4, "num_return_sequences": num_return_sequences, } beam_scorer = ConstrainedBeamSearchScorer( batch_size=batch_size, constraints=constraints, num_beams=beam_kwargs["num_beams"], device=torch_device, length_penalty=beam_kwargs["length_penalty"], do_early_stopping=beam_kwargs["early_stopping"], num_beam_hyps_to_keep=num_return_sequences, ) return beam_kwargs, beam_scorer @staticmethod def _get_encoder_outputs( model, input_ids, attention_mask, output_attentions=None, output_hidden_states=None, num_interleave=1 ): encoder = model.get_encoder() encoder_outputs = encoder( input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.repeat_interleave( num_interleave, dim=0 ) input_ids = torch.zeros_like(input_ids[:, :1]) + model._get_decoder_start_token_id() attention_mask = None return encoder_outputs, input_ids, attention_mask def _greedy_generate( self, model, input_ids, attention_mask, max_length, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], eos_token_id=model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) kwargs = {} model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, num_beams=1, max_length=max_length, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, remove_invalid_values=True, **logits_process_kwargs, **model_kwargs, ) if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_greedy = model.greedy_search( input_ids, max_length=max_length, logits_processor=logits_processor, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_greedy, output_generate def _sample_generate( self, model, input_ids, attention_mask, max_length, num_return_sequences, logits_processor, logits_warper, logits_warper_kwargs, process_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): torch.manual_seed(0) model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=True, num_beams=1, max_length=max_length, num_return_sequences=num_return_sequences, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, remove_invalid_values=True, **logits_warper_kwargs, **process_kwargs, **model_kwargs, ) torch.manual_seed(0) kwargs = {} if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=num_return_sequences, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(num_return_sequences, dim=0) # prevent flaky generation test failures logits_processor.append(InfNanRemoveLogitsProcessor()) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_sample = model.sample( input_ids.repeat_interleave(num_return_sequences, dim=0), max_length=max_length, logits_processor=logits_processor, logits_warper=logits_warper, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_sample, output_generate def _beam_search_generate( self, model, input_ids, attention_mask, max_length, beam_scorer, beam_kwargs, logits_processor, logits_process_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, max_length=max_length, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, remove_invalid_values=True, **beam_kwargs, **logits_process_kwargs, **model_kwargs, ) # beam_search does not automatically interleave `batch_size` dim for `num_beams` kwargs = {} if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=beam_scorer.num_beams, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(beam_scorer.num_beams, dim=0) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_beam_search = model.beam_search( input_ids.repeat_interleave(beam_scorer.num_beams, dim=0), beam_scorer, max_length=max_length, logits_processor=logits_processor, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_generate, output_beam_search def _beam_sample_generate( self, model, input_ids, attention_mask, max_length, num_return_sequences, beam_scorer, beam_kwargs, logits_warper, logits_warper_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): torch.manual_seed(0) model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=True, max_length=max_length, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, remove_invalid_values=True, **beam_kwargs, **logits_warper_kwargs, **model_kwargs, ) # beam_search does not automatically interleave `batch_size` dim for `num_beams * num_return_sequences` kwargs = {} if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=beam_scorer.num_beams * num_return_sequences, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(beam_scorer.num_beams * num_return_sequences, dim=0) # prevent flaky generation test failures logits_processor = LogitsProcessorList() logits_processor.append(InfNanRemoveLogitsProcessor()) torch.manual_seed(0) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_beam_sample = model.beam_sample( input_ids.repeat_interleave(beam_scorer.num_beams * num_return_sequences, dim=0), beam_scorer, max_length=max_length, logits_warper=logits_warper, logits_processor=logits_processor, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_generate, output_beam_sample def _group_beam_search_generate( self, model, input_ids, attention_mask, max_length, beam_scorer, beam_kwargs, logits_processor, logits_process_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, max_length=max_length, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, remove_invalid_values=True, **beam_kwargs, **logits_process_kwargs, **model_kwargs, ) # group_beam_search does not automatically interleave `batch_size` dim for `num_beams` kwargs = {} if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=beam_scorer.num_beams, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(beam_scorer.num_beams, dim=0) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_group_beam_search = model.group_beam_search( input_ids.repeat_interleave(beam_scorer.num_beams, dim=0), beam_scorer, max_length=max_length, logits_processor=logits_processor, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_generate, output_group_beam_search def _constrained_beam_search_generate( self, model, input_ids, attention_mask, max_length, constrained_beam_scorer, constraints, beam_kwargs, logits_processor, logits_process_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, max_length=max_length, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, remove_invalid_values=True, constraints=constraints, **beam_kwargs, **logits_process_kwargs, **model_kwargs, ) # group_beam_search does not automatically interleave `batch_size` dim for `num_beams` kwargs = {} if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=constrained_beam_scorer.num_beams, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(constrained_beam_scorer.num_beams, dim=0) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_group_beam_search = model.constrained_beam_search( input_ids.repeat_interleave(constrained_beam_scorer.num_beams, dim=0), constrained_beam_scorer, max_length=max_length, logits_processor=logits_processor, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_generate, output_group_beam_search def _contrastive_generate( self, model, input_ids, attention_mask, max_length, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): contrastive_search_kwargs = { "penalty_alpha": 0.6, "top_k": 5, } if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], eos_token_id=model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) kwargs = {} model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, num_beams=1, max_length=max_length, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, remove_invalid_values=True, **logits_process_kwargs, **model_kwargs, **contrastive_search_kwargs, ) if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)]) output_contrastive = model.contrastive_search( input_ids, stopping_criteria=stopping_criteria, logits_processor=logits_processor, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, **contrastive_search_kwargs, ) return output_contrastive, output_generate def test_greedy_generate(self): # check `generate()` and `greedy_search()` are equal for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # test old generation output for backwards compatibility model = model_class(config).to(torch_device).eval() output_greedy, output_generate = self._greedy_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length ) self.assertListEqual(output_greedy.tolist(), output_generate.tolist()) def test_greedy_generate_dict_outputs(self): for model_class in self.all_generative_model_classes: # disable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() config.use_cache = False model = model_class(config).to(torch_device).eval() output_greedy, output_generate = self._greedy_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_greedy, GreedySearchEncoderDecoderOutput) self.assertIsInstance(output_generate, GreedySearchEncoderDecoderOutput) else: self.assertIsInstance(output_greedy, GreedySearchDecoderOnlyOutput) self.assertIsInstance(output_generate, GreedySearchDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_greedy.sequences.tolist()) for output in (output_greedy, output_generate): self._check_outputs(output, input_ids, model.config) def test_greedy_generate_dict_outputs_use_cache(self): for model_class in self.all_generative_model_classes: # enable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() if not hasattr(config, "use_cache"): # only relevant if model has "use_cache" return config.use_cache = True config.is_decoder = True model = model_class(config).to(torch_device).eval() output_greedy, output_generate = self._greedy_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) self.assertListEqual(output_generate.sequences.tolist(), output_greedy.sequences.tolist()) for output in (output_greedy, output_generate): self._check_outputs(output, input_ids, model.config, use_cache=True) def test_sample_generate(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1) # check `generate()` and `sample()` are equal output_sample, output_generate = self._sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, num_return_sequences=1, logits_processor=logits_processor, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, process_kwargs=process_kwargs, ) self.assertListEqual(output_sample.tolist(), output_generate.tolist()) # check `generate()` and `sample()` yield equal results for `num_return_sequences` output_sample, output_generate = self._sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, num_return_sequences=3, logits_processor=logits_processor, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, process_kwargs=process_kwargs, ) self.assertListEqual(output_sample.tolist(), output_generate.tolist()) def test_sample_generate_dict_output(self): for model_class in self.all_generative_model_classes: # disable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() config.use_cache = False model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1) output_sample, output_generate = self._sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, num_return_sequences=2, logits_processor=logits_processor, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, process_kwargs=process_kwargs, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_sample, SampleEncoderDecoderOutput) self.assertIsInstance(output_generate, SampleEncoderDecoderOutput) else: self.assertIsInstance(output_sample, SampleDecoderOnlyOutput) self.assertIsInstance(output_generate, SampleDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_sample.sequences.tolist()) for output in (output_sample, output_generate): self._check_outputs(output, input_ids, model.config, num_return_sequences=2) def test_beam_search_generate(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, ) beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length) # check `generate()` and `beam_search()` are equal output_generate, output_beam_search = self._beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_process_kwargs=logits_process_kwargs, logits_processor=logits_processor, ) self.assertListEqual(output_generate.tolist(), output_beam_search.tolist()) # check `generate()` and `beam_search()` are equal for `num_return_sequences` num_return_sequences = 2 if model.config.is_encoder_decoder: max_length = 4 beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs( input_ids.shape[0], max_length, num_return_sequences=num_return_sequences ) output_generate, output_beam_search = self._beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_process_kwargs=logits_process_kwargs, logits_processor=logits_processor, ) self.assertListEqual(output_generate.tolist(), output_beam_search.tolist()) def test_beam_search_generate_dict_output(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # disable cache config.use_cache = False # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, ) beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length) output_generate, output_beam_search = self._beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_process_kwargs=logits_process_kwargs, logits_processor=logits_processor, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_beam_search, BeamSearchEncoderDecoderOutput) self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput) else: self.assertIsInstance(output_beam_search, BeamSearchDecoderOnlyOutput) self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_beam_search.sequences.tolist()) self.assertTrue( torch.allclose(output_generate["sequences_scores"], output_beam_search["sequences_scores"], atol=1e-3) ) self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],)) self.assertTrue((output_generate["sequences_scores"] < 0).all().item()) for output in (output_beam_search, output_generate): self._check_outputs(output, input_ids, model.config, num_return_sequences=beam_scorer.num_beams) def test_beam_search_generate_dict_outputs_use_cache(self): for model_class in self.all_generative_model_classes: # enable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None if not hasattr(config, "use_cache"): # only relevant if model has "use_cache" return model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, ) beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length) config.use_cache = True config.is_decoder = True model = model_class(config).to(torch_device).eval() output_beam, output_generate = self._beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_process_kwargs=logits_process_kwargs, logits_processor=logits_processor, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) self.assertListEqual(output_generate.sequences.tolist(), output_beam.sequences.tolist()) for output in (output_beam, output_generate): self._check_outputs( output, input_ids, model.config, use_cache=True, num_return_sequences=beam_scorer.num_beams ) def test_beam_sample_generate(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1) model = model_class(config).to(torch_device).eval() # check `generate()` and `beam_search()` are equal # change `num_return_sequences = 2` but not for `beam_scorer` num_return_sequences = 2 if model.config.is_encoder_decoder: max_length = 4 beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs( input_ids.shape[0] * num_return_sequences, max_length ) beam_kwargs["num_return_sequences"] = num_return_sequences output_generate, output_beam_sample = self._beam_sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, num_return_sequences=num_return_sequences, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, ) self.assertListEqual(output_generate.tolist(), output_beam_sample.tolist()) def test_beam_sample_generate_dict_output(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # disable cache config.use_cache = False # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1) num_return_sequences = 2 if model.config.is_encoder_decoder: max_length = 4 beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs( input_ids.shape[0] * num_return_sequences, max_length ) beam_kwargs["num_return_sequences"] = num_return_sequences output_beam_sample, output_generate = self._beam_sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, num_return_sequences=num_return_sequences, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_beam_sample, BeamSampleEncoderDecoderOutput) self.assertIsInstance(output_generate, BeamSampleEncoderDecoderOutput) else: self.assertIsInstance(output_beam_sample, BeamSampleDecoderOnlyOutput) self.assertIsInstance(output_generate, BeamSampleDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_beam_sample.sequences.tolist()) self.assertTrue( torch.allclose(output_generate["sequences_scores"], output_beam_sample["sequences_scores"], atol=1e-3) ) self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],)) self.assertTrue((output_generate["sequences_scores"] < 0).all().item()) for output in (output_beam_sample, output_generate): self._check_outputs( output, input_ids, model.config, num_return_sequences=num_return_sequences * beam_scorer.num_beams ) def test_generate_without_input_ids(self): config, _, _, max_length = self._get_input_ids_and_config() # if no bos token id => cannot generate from None if config.bos_token_id is None: return for model_class in self.all_generative_model_classes: model = model_class(config).to(torch_device) model.eval() output_ids_generate = model.generate(do_sample=False, max_length=max_length, remove_invalid_values=True) self.assertIsNotNone(output_ids_generate) def test_group_beam_search_generate(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, diversity_penalty=2.0, ) # check `generate()` and `group_beam_search()` are equal beam_kwargs, beam_scorer = self._get_diverse_beam_scorer_and_kwargs(input_ids.shape[0], max_length) output_generate, output_group_beam_search = self._group_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, ) self.assertListEqual(output_generate.tolist(), output_group_beam_search.tolist()) # check `generate()` and `group_beam_search()` are equal for `num_return_sequences` num_return_sequences = 2 if model.config.is_encoder_decoder: max_length = 4 beam_kwargs, beam_scorer = self._get_diverse_beam_scorer_and_kwargs( input_ids.shape[0], max_length, num_return_sequences=num_return_sequences ) output_generate, output_group_beam_search = self._group_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, ) self.assertListEqual(output_generate.tolist(), output_group_beam_search.tolist()) def test_group_beam_search_generate_dict_output(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() config.use_cache = False # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, diversity_penalty=2.0, ) num_return_sequences = 1 beam_kwargs, beam_scorer = self._get_diverse_beam_scorer_and_kwargs( input_ids.shape[0], max_length, num_return_sequences=num_return_sequences ) output_generate, output_group_beam_search = self._group_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_group_beam_search, BeamSearchEncoderDecoderOutput) self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput) else: self.assertIsInstance(output_group_beam_search, BeamSearchDecoderOnlyOutput) self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_group_beam_search.sequences.tolist()) self.assertTrue( torch.allclose( output_generate["sequences_scores"], output_group_beam_search["sequences_scores"], atol=1e-3 ) ) self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],)) self.assertTrue((output_generate["sequences_scores"] < 0).all().item()) for output in (output_group_beam_search, output_generate): self._check_outputs( output, input_ids, model.config, num_return_sequences=num_return_sequences * beam_scorer.num_beams ) def test_constrained_beam_search_generate(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() max_length = 20 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, ) # check `generate()` and `constrained_beam_search()` are equal # Sample constraints if not input_ids.dtype == torch.float32: min_id = torch.min(input_ids) + 3 max_id = torch.max(input_ids) else: # otherwise this throws an error for Speech2TextModel since its inputs are floating points min_id = 3 max_id = 100 force_tokens = torch.randint(min_id, max_id, (1, 2)).tolist()[0] constraints = [ PhrasalConstraint(force_tokens), ] beam_kwargs, beam_scorer = self._get_constrained_beam_scorer_and_kwargs( input_ids.shape[0], max_length, constraints, num_return_sequences=1 ) output_generate, output_beam_search = self._constrained_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, constrained_beam_scorer=beam_scorer, constraints=constraints, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, ) self.assertListEqual(output_generate.tolist(), output_beam_search.tolist()) for generation_output in output_generate: self._check_sequence_inside_sequence(force_tokens, generation_output) # check `generate()` and `constrained_beam_search()` are equal for `num_return_sequences` # Sample constraints force_tokens = torch.randint(min_id, max_id, (1, 2)).tolist()[0] constraints = [ PhrasalConstraint(force_tokens), ] num_return_sequences = 2 max_length = 20 beam_kwargs, beam_scorer = self._get_constrained_beam_scorer_and_kwargs( input_ids.shape[0], max_length, constraints, num_return_sequences=num_return_sequences ) output_generate, output_beam_search = self._constrained_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, constrained_beam_scorer=beam_scorer, constraints=constraints, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, ) self.assertListEqual(output_generate.tolist(), output_beam_search.tolist()) for generation_output in output_generate: self._check_sequence_inside_sequence(force_tokens, generation_output) def test_constrained_beam_search_generate_dict_output(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # disable cache config.use_cache = False # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 20 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, ) # Sample constraints if not input_ids.dtype == torch.float32: min_id = torch.min(input_ids) + 3 max_id = torch.max(input_ids) else: # otherwise this throws an error for Speech2TextModel since its inputs are floating points min_id = 3 max_id = 100 force_tokens = torch.randint(min_id, max_id, (1, 2)).tolist()[0] constraints = [ PhrasalConstraint(force_tokens), ] beam_kwargs, beam_scorer = self._get_constrained_beam_scorer_and_kwargs( input_ids.shape[0], max_length, constraints, num_return_sequences=1 ) output_generate, output_beam_search = self._constrained_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, constrained_beam_scorer=beam_scorer, constraints=constraints, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_beam_search, BeamSearchEncoderDecoderOutput) self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput) else: self.assertIsInstance(output_beam_search, BeamSearchDecoderOnlyOutput) self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_beam_search.sequences.tolist()) self.assertTrue( torch.allclose(output_generate["sequences_scores"], output_beam_search["sequences_scores"], atol=1e-3) ) self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],)) self.assertTrue((output_generate["sequences_scores"] < 0).all().item()) for output in (output_beam_search, output_generate): self._check_outputs(output, input_ids, model.config, num_return_sequences=beam_scorer.num_beams) def test_contrastive_generate(self): # check `generate()` and `contrastive_search()` are equal for model_class in self.all_generative_model_classes: # won't fix: FSMT and Reformer have a different cache variable type (and format). if any(model_name in model_class.__name__.lower() for model_name in ["fsmt", "reformer"]): return config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # NOTE: contrastive search only works with cache on at the moment. if not hasattr(config, "use_cache"): return config.use_cache = True config.is_decoder = True # test old generation output for backwards compatibility model = model_class(config).to(torch_device).eval() output_contrastive, output_generate = self._contrastive_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length ) self.assertListEqual(output_contrastive.tolist(), output_generate.tolist()) def test_contrastive_generate_dict_outputs_use_cache(self): for model_class in self.all_generative_model_classes: # won't fix: FSMT and Reformer have a different cache variable type (and format). if any(model_name in model_class.__name__.lower() for model_name in ["fsmt", "reformer"]): return # enable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # NOTE: contrastive search only works with cache on at the moment. if not hasattr(config, "use_cache"): return config.use_cache = True config.is_decoder = True model = model_class(config).to(torch_device).eval() output_contrastive, output_generate = self._contrastive_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) self.assertListEqual(output_generate.sequences.tolist(), output_contrastive.sequences.tolist()) for output in (output_contrastive, output_generate): self._check_outputs(output, input_ids, model.config, use_cache=True) def test_generate_with_head_masking(self): """Test designed for encoder-decoder models to ensure the attention head masking is used.""" attention_names = ["encoder_attentions", "decoder_attentions", "cross_attentions"] for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() model = model_class(config).to(torch_device) # We want to test only encoder-decoder models if not config.is_encoder_decoder: continue head_masking = { "head_mask": torch.zeros(config.encoder_layers, config.encoder_attention_heads, device=torch_device), "decoder_head_mask": torch.zeros( config.decoder_layers, config.decoder_attention_heads, device=torch_device ), "cross_attn_head_mask": torch.zeros( config.decoder_layers, config.decoder_attention_heads, device=torch_device ), } signature = inspect.signature(model.forward) # We want to test only models where encoder/decoder head masking is implemented if not set(head_masking.keys()) < set([*signature.parameters.keys()]): continue for attn_name, (name, mask) in zip(attention_names, head_masking.items()): out = model.generate( input_ids, attention_mask=attention_mask, num_beams=1, output_attentions=True, return_dict_in_generate=True, remove_invalid_values=True, **{name: mask}, ) # We check the state of decoder_attentions and cross_attentions just from the last step attn_weights = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights]), 0.0) def _check_outputs(self, output, input_ids, config, use_cache=False, num_return_sequences=1): batch_size, seq_length = input_ids.shape num_sequences_in_output = batch_size * num_return_sequences gen_len = ( output.sequences.shape[-1] - 1 if config.is_encoder_decoder else output.sequences.shape[-1] - seq_length ) # scores self._check_scores(num_sequences_in_output, output.scores, length=gen_len, config=config) # Attentions if config.is_encoder_decoder: # encoder self._check_encoder_attention_for_generate(output.encoder_attentions, batch_size, config, seq_length) # decoder self._check_attentions_for_generate( num_sequences_in_output, output.decoder_attentions, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) else: # if use_cache first input is equal to no use_cache, so skip here attentions = output.attentions if not use_cache else output.attentions[1:] min_length = seq_length if not use_cache else seq_length + 1 self._check_attentions_for_generate( num_sequences_in_output, attentions=attentions, min_length=min_length, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) # Hidden States if config.is_encoder_decoder: # encoder self._check_encoder_hidden_states_for_generate( output.encoder_hidden_states, batch_size, config, seq_length ) # decoder self._check_hidden_states_for_generate( num_sequences_in_output, output.decoder_hidden_states, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) else: # if use_cache first input is equal to no use_cache, so skip here hidden_states = output.hidden_states if not use_cache else output.hidden_states[1:] min_length = seq_length if not use_cache else seq_length + 1 self._check_hidden_states_for_generate( num_sequences_in_output, hidden_states, min_length=min_length, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) def _check_scores(self, batch_size, scores, length, config): expected_shape = (batch_size, config.vocab_size) self.assertIsInstance(scores, tuple) self.assertEqual(len(scores), length) self.assertListEqual([iter_scores.shape for iter_scores in scores], [expected_shape] * len(scores)) def _check_attentions_for_generate( self, batch_size, attentions, min_length, max_length, config, use_cache=False, num_beam_groups=1 ): self.assertIsInstance(attentions, tuple) self.assertListEqual( [isinstance(iter_attentions, tuple) for iter_attentions in attentions], [True] * len(attentions) ) self.assertEqual(len(attentions), (max_length - min_length) * num_beam_groups) for idx, iter_attentions in enumerate(attentions): tgt_len = min_length + idx if not use_cache else 1 src_len = min_length + idx expected_shape = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(iter_attentions) ) def _check_encoder_attention_for_generate(self, attentions, batch_size, config, seq_length): encoder_expected_shape = (batch_size, config.num_attention_heads, seq_length, seq_length) self.assertIsInstance(attentions, tuple) self.assertListEqual( [layer_attentions.shape for layer_attentions in attentions], [encoder_expected_shape] * len(attentions), ) def _check_hidden_states_for_generate( self, batch_size, hidden_states, min_length, max_length, config, use_cache=False, num_beam_groups=1 ): self.assertIsInstance(hidden_states, tuple) self.assertListEqual( [isinstance(iter_hidden_states, tuple) for iter_hidden_states in hidden_states], [True] * len(hidden_states), ) self.assertEqual(len(hidden_states), (max_length - min_length) * num_beam_groups) for idx, iter_hidden_states in enumerate(hidden_states): seq_len = min_length + idx if not use_cache else 1 expected_shape = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states], [expected_shape] * len(iter_hidden_states), ) def _check_encoder_hidden_states_for_generate(self, hidden_states, batch_size, config, seq_length): encoder_expected_shape = (batch_size, seq_length, config.hidden_size) self.assertIsInstance(hidden_states, tuple) self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in hidden_states], [encoder_expected_shape] * len(hidden_states), ) def _check_sequence_inside_sequence(self, tensor_1, tensor_2): # check if tensor_1 inside tensor_2 or tensor_2 inside tensor_1. # set to same device. we don't care what device. if not isinstance(tensor_1, list): tensor_1 = tensor_1.cpu().tolist() if not isinstance(tensor_2, list): tensor_2 = tensor_2.cpu().tolist() in_order = len(tensor_1) <= len(tensor_2) longer = tensor_2 if in_order else tensor_1 shorter = tensor_1 if in_order else tensor_2 flag = False chunk_size = len(shorter) for chunk_idx in range(len(longer) - chunk_size + 1): subseq = longer[chunk_idx : chunk_idx + chunk_size] if subseq == shorter: flag = True break self.assertTrue(flag) @require_torch class UtilsFunctionsTest(unittest.TestCase): # tests whether the top_k_top_p function behaves as expected def test_top_k_top_p_filtering(self): logits = torch.tensor( [ [ 8.2220991, # 3rd highest value; idx. 0 -0.5620044, 5.23229752, 4.0386393, -6.8798378, -0.54785802, -3.2012153, 2.92777176, 1.88171953, 7.35341276, 8.43207833, # 2nd highest value; idx. 10 -9.85711836, -5.96209236, -1.13039161, -7.1115294, -0.8369633, -5.3186408, 7.06427407, 0.81369344, -0.82023817, -5.9179796, 0.58813443, -6.99778438, 4.71551189, -0.18771637, 7.44020759, # 4th highest value; idx. 25 9.38450987, # 1st highest value; idx. 26 2.12662941, -9.32562038, 2.35652522, ], # cummulative prob of 4 highest values <= 0.6 [ 0.58425518, 4.53139238, -5.57510464, -6.28030699, -7.19529503, -4.02122551, 1.39337037, -6.06707057, 1.59480517, -9.643119, 0.03907799, 0.67231762, -8.88206726, 6.27115922, # 4th highest value; idx. 13 2.28520723, 4.82767506, 4.30421368, 8.8275313, # 2nd highest value; idx. 17 5.44029958, -4.4735794, 7.38579536, # 3rd highest value; idx. 20 -2.91051663, 2.61946077, -2.5674762, -9.48959302, -4.02922645, -1.35416918, 9.67702323, # 1st highest value; idx. 27 -5.89478553, 1.85370467, ], # cummulative prob of 4 highest values <= 0.6 ], dtype=torch.float, device=torch_device, ) non_inf_expected_idx = torch.tensor( [[0, 0], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 20], [1, 27]], dtype=torch.long, device=torch_device, ) # expected non filtered idx as noted above non_inf_expected_output = torch.tensor( [ 8.2221, 8.4321, 7.4402, 9.3845, 6.2712, 8.8275, 7.3858, 9.6770, ], # expected non filtered values as noted above dtype=torch.float, device=torch_device, ) output = top_k_top_p_filtering(logits, top_k=10, top_p=0.6, min_tokens_to_keep=4) non_inf_output = output[output != -float("inf")].to(device=torch_device) non_inf_idx = (output != -float("inf")).nonzero().to(device=torch_device) self.assertTrue(torch.allclose(non_inf_expected_output, non_inf_output, atol=1e-12)) self.assertTrue(torch.all(torch.eq(non_inf_expected_idx, non_inf_idx))) # tests whether the function uses filter_value instead of default -inf def test_top_k_top_p_filtering_with_filter_value(self): logits = torch.tensor( [ [ 1, 1, 1, 0.99, # get filtered by top-p filtering 0.98, # get filtered by top-k filtering ] ], dtype=torch.float, device=torch_device, ) expected_output = torch.tensor( [[1, 1, 1, 0, 0]], dtype=torch.float, device=torch_device, ) output = top_k_top_p_filtering(logits, top_k=4, top_p=0.5, filter_value=0.0) self.assertTrue(torch.allclose(expected_output, output, atol=1e-12)) @require_torch class GenerationIntegrationTests(unittest.TestCase): @slow def test_diverse_beam_search(self): article = """Justin Timberlake and Jessica Biel, welcome to parenthood. The celebrity couple announced the arrival of their son, Silas Randall Timberlake, in statements to People. "Silas was the middle name of Timberlake's maternal grandfather Bill Bomar, who died in 2012, while Randall is the musician's own middle name, as well as his father's first," People reports. The couple announced the pregnancy in January, with an Instagram post. It is the first baby for both.""" bart_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn") bart_model = BartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn").to(torch_device) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) outputs = bart_model.generate( input_ids, num_beams=4, num_return_sequences=2, num_beam_groups=4, diversity_penalty=2.0, remove_invalid_values=True, ) generated_text = bart_tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The couple announced the birth of their son, Silas Randall Timberlake, in a statement. Silas was the" " middle name of Timberlake's maternal grandfather Bill Bomar. Randall is the musician's own middle" " name, as well as his father's first. It is the first baby for both of them.", "Justin Timberlake and Jessica Biel have a son. The baby is named Silas Randall Timberlake. It is the" " first child for both. The couple announced the pregnancy in January. The name Silas is the middle" " name of Timberlake's maternal grandfather. It's also his own middle name.", ], ) def test_max_length_backward_compat_greedy(self): article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) max_length = 20 input_ids = input_ids.expand(2, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) input_ids = bart_model._prepare_decoder_input_ids_for_generation( input_ids.shape[0], decoder_start_token_id=bart_model.config.decoder_start_token_id, bos_token_id=bart_model.config.bos_token_id, ) with self.assertWarns(UserWarning): bart_model.greedy_search( input_ids, max_length=max_length, pad_token_id=bart_model.config.pad_token_id, eos_token_id=bart_model.config.eos_token_id, **model_kwargs, ) def test_max_length_backward_compat_sample(self): article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) max_length = 20 input_ids = input_ids.expand(2, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) input_ids = bart_model._prepare_decoder_input_ids_for_generation( input_ids.shape[0], decoder_start_token_id=bart_model.config.decoder_start_token_id, bos_token_id=bart_model.config.bos_token_id, ) with torch.no_grad(): with self.assertWarns(UserWarning): bart_model.sample( input_ids, max_length=max_length, pad_token_id=bart_model.config.pad_token_id, eos_token_id=bart_model.config.eos_token_id, **model_kwargs, ) def test_max_length_backward_compat_beam_search(self): article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) batch_size = 1 max_length = 20 num_beams = 2 input_ids = input_ids.expand(2, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) input_ids = bart_model._prepare_decoder_input_ids_for_generation( input_ids.shape[0], decoder_start_token_id=bart_model.config.decoder_start_token_id, bos_token_id=bart_model.config.bos_token_id, ) beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=torch_device, ) with self.assertWarns(UserWarning): _ = bart_model.beam_search( input_ids, num_beams=num_beams, max_length=max_length, beam_scorer=beam_scorer, **model_kwargs ) def test_max_length_backward_compat_group_beam_search(self): article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) batch_size = 1 max_length = 20 num_beams = 6 num_beam_groups = 3 num_return_sequences = num_beams * batch_size input_ids = input_ids.expand(6, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) input_ids = bart_model._prepare_decoder_input_ids_for_generation( input_ids.shape[0], decoder_start_token_id=bart_model.config.decoder_start_token_id, bos_token_id=bart_model.config.bos_token_id, ) diverse_beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=torch_device, num_beam_hyps_to_keep=num_return_sequences, num_beam_groups=num_beam_groups, ) with self.assertWarns(UserWarning): bart_model.group_beam_search( input_ids, diverse_beam_scorer, num_beams=num_beams, max_length=max_length, **model_kwargs ) def test_max_length_warning_if_different(self): article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) batch_size = 1 max_length = 20 num_beams = 6 num_beam_groups = 3 num_return_sequences = num_beams * batch_size stopping_criteria_max_length = 18 stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=stopping_criteria_max_length)]) # Greedy input_ids = input_ids.expand(6, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) input_ids = bart_model._prepare_decoder_input_ids_for_generation( input_ids.shape[0], decoder_start_token_id=bart_model.config.decoder_start_token_id, bos_token_id=bart_model.config.bos_token_id, ) with self.assertWarns(UserWarning): bart_model.greedy_search( input_ids, max_length=max_length, pad_token_id=bart_model.config.pad_token_id, stopping_criteria=stopping_criteria, eos_token_id=bart_model.config.eos_token_id, **model_kwargs, ) # Sample with self.assertWarns(UserWarning): with torch.no_grad(): bart_model.sample( input_ids, max_length=max_length, stopping_criteria=stopping_criteria, pad_token_id=bart_model.config.pad_token_id, eos_token_id=bart_model.config.eos_token_id, **model_kwargs, ) # Beam beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=torch_device, ) with self.assertWarns(UserWarning): with torch.no_grad(): bart_model.beam_search( input_ids, num_beams=num_beams, stopping_criteria=stopping_criteria, max_length=max_length, beam_scorer=beam_scorer, **model_kwargs, ) # Grouped beam search diverse_beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=torch_device, num_beam_hyps_to_keep=num_return_sequences, num_beam_groups=num_beam_groups, ) with self.assertWarns(UserWarning): bart_model.group_beam_search( input_ids, diverse_beam_scorer, stopping_criteria=stopping_criteria, num_beams=num_beams, max_length=max_length, **model_kwargs, ) def test_beam_search_warning_if_max_length_is_passed(self): article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) batch_size = 1 num_beams = 3 input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) input_ids = input_ids.expand(num_beams, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) # pretend decoder_input_ids correspond to first encoder input id decoder_input_ids = input_ids[:, :1] stopping_criteria_max_length = 18 stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=stopping_criteria_max_length)]) with self.assertWarns(UserWarning): beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=torch_device, max_length=10, ) generated_ids = bart_model.beam_search( decoder_input_ids, num_beams=num_beams, stopping_criteria=stopping_criteria, beam_scorer=beam_scorer, **model_kwargs, ) beam_scorer_no_max_len = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=torch_device, ) generated_ids_no_max_len = bart_model.beam_search( decoder_input_ids, num_beams=num_beams, stopping_criteria=stopping_criteria, beam_scorer=beam_scorer_no_max_len, **model_kwargs, ) # BeamSearchScorer max_length should not influence "real" max_length self.assertEqual(generated_ids.tolist(), generated_ids_no_max_len.tolist()) def test_custom_stopping_criteria_overload_error(self): article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random") bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) stopping_criteria = StoppingCriteriaList() stopping_criteria.append(MaxLengthCriteria(max_length=42)) with self.assertRaises(ValueError): bart_model.generate(input_ids, stopping_criteria=stopping_criteria) with self.assertRaises(ValueError): bart_model.generate(input_ids, stopping_criteria=stopping_criteria, max_length=32) def test_custom_stopping_criteria(self): article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random") bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) class DummyCriteria(StoppingCriteria): def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: return input_ids.shape[-1] >= 20 stopping_criteria = StoppingCriteriaList() stopping_criteria.append(DummyCriteria()) self.assertEqual( list(bart_model.generate(input_ids, stopping_criteria=stopping_criteria, max_length=22).shape), [1, 20], ) self.assertEqual( list(bart_model.generate(input_ids, stopping_criteria=stopping_criteria, max_length=18).shape), [1, 18], ) def test_stop_sequence_stopping_criteria(self): prompt = """Hello I believe in""" generator = pipeline("text-generation", model="hf-internal-testing/tiny-random-bart") output = generator(prompt) self.assertEqual( output, [ { "generated_text": ( "Hello I believe in in in number number number number number number number number number" ) } ], ) output = generator(prompt, stop_sequence=" number") self.assertEqual(output, [{"generated_text": "Hello I believe in in in number"}]) def test_custom_logits_processor(self): bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random") article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random", min_length=1).to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) logits_processor = LogitsProcessorList() logits_processor.append(MinLengthLogitsProcessor(min_length=10, eos_token_id=0)) # it should not be allowed to both define `min_length` via config and `logits_processor` list with self.assertRaises(ValueError): bart_model.generate(input_ids, logits_processor=logits_processor) bart_model.config.min_length = None bart_model.generate(input_ids, logits_processor=logits_processor) def test_max_new_tokens_encoder_decoder(self): article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) self.assertEqual(list(input_ids.shape), [1, 29]) max_new_tokens = 3 bart_model.config.max_length = 20 bart_model.config.eos_token_id = None # Encoder decoder call outputs = bart_model.generate(input_ids, max_new_tokens=max_new_tokens) # 1 BOS + 3 new tokens self.assertEqual(list(outputs.shape), [1, 4]) # Decoder only call outputs = bart_model.generate(decoder_input_ids=input_ids, max_new_tokens=max_new_tokens) # 29 + 3 new tokens self.assertEqual(list(outputs.shape), [1, 32]) # Encoder decoder call > 20 outputs = bart_model.generate(max_new_tokens=max_new_tokens + 20) # 1 BOS + 20 + 3 new tokens self.assertEqual(list(outputs.shape), [1, 24]) # max_new_tokens and max_length serve the same purpose and must not be used together. with self.assertRaises(ValueError): bart_model.generate(decoder_input_ids=input_ids, max_new_tokens=10, max_length=20) def test_max_new_tokens_decoder_only_contrastive_search_t5(self): article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" t5_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") t5_model = T5ForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-t5").to(torch_device) input_ids = t5_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) self.assertEqual(list(input_ids.shape), [1, 56]) max_new_tokens = 3 t5_model.config.max_length = 20 t5_model.config.eos_token_id = None # Encoder decoder call outputs = t5_model.generate(input_ids, max_new_tokens=max_new_tokens, penalty_alpha=0.6, top_k=4) # 1 BOS + 3 new tokens self.assertEqual(list(outputs.shape), [1, 4]) # Decoder only call outputs = t5_model.generate( decoder_input_ids=input_ids, max_new_tokens=max_new_tokens, penalty_alpha=0.6, top_k=4 ) # 56 + 3 new tokens self.assertEqual(list(outputs.shape), [1, 59]) # Encoder decoder call > 20 outputs = t5_model.generate(max_new_tokens=max_new_tokens + 20, penalty_alpha=0.6, top_k=4) # 1 BOS + 20 + 3 new tokens self.assertEqual(list(outputs.shape), [1, 24]) # max_new_tokens and max_length serve the same purpose and must not be used together. with self.assertRaises(ValueError): t5_model.generate( decoder_input_ids=input_ids, max_new_tokens=10, max_length=20, penalty_alpha=0.6, top_k=4 ) def test_max_new_tokens_decoder_only_contrastive_search_bart(self): article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) self.assertEqual(list(input_ids.shape), [1, 29]) max_new_tokens = 3 bart_model.config.max_length = 20 bart_model.config.eos_token_id = None # Encoder decoder call outputs = bart_model.generate(input_ids, max_new_tokens=max_new_tokens, penalty_alpha=0.6, top_k=4) # 1 BOS + 3 new tokens self.assertEqual(list(outputs.shape), [1, 4]) # Decoder only call outputs = bart_model.generate( decoder_input_ids=input_ids, max_new_tokens=max_new_tokens, penalty_alpha=0.6, top_k=4 ) # 29 + 3 new tokens self.assertEqual(list(outputs.shape), [1, 32]) # Encoder decoder call > 20 outputs = bart_model.generate(max_new_tokens=max_new_tokens + 20, penalty_alpha=0.6, top_k=4) # 1 BOS + 20 + 3 new tokens self.assertEqual(list(outputs.shape), [1, 24]) # max_new_tokens and max_length serve the same purpose and must not be used together. with self.assertRaises(ValueError): bart_model.generate( decoder_input_ids=input_ids, max_new_tokens=10, max_length=20, penalty_alpha=0.6, top_k=4 ) def test_max_new_tokens_decoder_only_contrastive_search_gptj(self): article = """Justin Timberlake.""" gptj_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gptj") gptj_model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gptj").to(torch_device) input_ids = gptj_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) self.assertEqual(list(input_ids.shape), [1, 9]) max_new_tokens = 3 gptj_model.config.max_length = 20 # call < 20 outputs = gptj_model.generate(input_ids, max_new_tokens=max_new_tokens, penalty_alpha=0.6, top_k=4) # 9 input_ids + 3 new tokens self.assertEqual(list(outputs.shape), [1, 12]) # call > 20 outputs = gptj_model.generate(max_new_tokens=max_new_tokens + 20, penalty_alpha=0.6, top_k=4) # 1 BOS token + 23 new tokens self.assertEqual(list(outputs.shape), [1, 24]) # max_new_tokens and max_length serve the same purpose and must not be used together. with self.assertRaises(ValueError): gptj_model.generate(input_ids=input_ids, max_new_tokens=10, max_length=20, penalty_alpha=0.6, top_k=4) def test_max_new_tokens_decoder_only_contrastive_search_gpt2(self): article = """Justin Timberlake.""" gpt2_tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") gpt2_model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) input_ids = gpt2_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) self.assertEqual(list(input_ids.shape), [1, 9]) max_new_tokens = 3 gpt2_model.config.max_length = 20 # call < 20 outputs = gpt2_model.generate(input_ids, max_new_tokens=max_new_tokens, penalty_alpha=0.6, top_k=4) # 9 input_ids + 3 new tokens self.assertEqual(list(outputs.shape), [1, 12]) # call > 20 outputs = gpt2_model.generate(max_new_tokens=max_new_tokens + 20, penalty_alpha=0.6, top_k=4) # 1 BOS token + 23 new tokens self.assertEqual(list(outputs.shape), [1, 24]) # max_new_tokens and max_length serve the same purpose and must not be used together. with self.assertRaises(ValueError): gpt2_model.generate(input_ids=input_ids, max_new_tokens=10, max_length=20, penalty_alpha=0.6, top_k=4) def test_max_new_tokens_decoder_only(self): article = """Justin Timberlake.""" gpt2_tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") gpt2_model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) input_ids = gpt2_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) self.assertEqual(list(input_ids.shape), [1, 9]) max_new_tokens = 3 gpt2_model.config.max_length = 20 # call < 20 outputs = gpt2_model.generate(input_ids, max_new_tokens=max_new_tokens) # 9 input_ids + 3 new tokens self.assertEqual(list(outputs.shape), [1, 12]) # call > 20 outputs = gpt2_model.generate(max_new_tokens=max_new_tokens + 20) # 1 BOS token + 23 new tokens self.assertEqual(list(outputs.shape), [1, 24]) # max_new_tokens and max_length serve the same purpose and must not be used together. with self.assertRaises(ValueError): gpt2_model.generate(input_ids=input_ids, max_new_tokens=10, max_length=20) def test_encoder_decoder_generate_with_inputs_embeds(self): article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart", max_length=5).to( torch_device ) model.config.eos_token_id = None input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device) inputs_embeds = model.get_input_embeddings()(input_ids) output_sequences = model.generate(inputs_embeds=inputs_embeds) # make sure model generated correctly until `max_length` self.assertEqual(output_sequences.shape, (1, 5)) def test_encoder_decoder_generate_attention_mask(self): articles = ["Timberlake", "Jessica Biel, welcome to parenthood among other things"] tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") # need extrem generation values here to force this test # to fail when `attention_mask` is not correctly treated in generate model = BartForConditionalGeneration.from_pretrained( "hf-internal-testing/tiny-random-bart", max_length=50, num_beams=5, num_return_sequences=5 ).to(torch_device) model.config.eos_token_id = None input_ids = tokenizer(articles[0], return_tensors="pt").input_ids.to(torch_device) input_ids_batched = tokenizer(articles, padding=True, return_tensors="pt").input_ids.to(torch_device) output_sequences_batched = model.generate( input_ids=input_ids_batched, return_dict_in_generate=True, output_scores=True ) output_sequences = model.generate(input_ids=input_ids, return_dict_in_generate=True, output_scores=True) batched_out = output_sequences_batched.sequences_scores out = output_sequences.sequences_scores diff = (batched_out[:5].sum() - out.sum()).abs() self.assertTrue(diff < 1e-4) def test_decoder_generate_with_inputs_embeds(self): article = """I need input_ids to generate""" tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2", max_length=5).to(torch_device) input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device) inputs_embeds = model.get_input_embeddings()(input_ids) # cannot generate from `inputs_embeds` for decoder only with self.assertRaises(ValueError): model.generate(inputs_embeds=inputs_embeds) def test_generate_input_ids_as_kwarg(self): article = """I need input_ids to generate""" tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2", max_length=15).to(torch_device) input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device) output_sequences_kwargs = model.generate(input_ids=input_ids).cpu() output_sequences = model.generate(input_ids).cpu() self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist()) self.assertEqual(output_sequences.shape, (1, 15)) def test_generate_non_nlp_input_ids_as_kwarg(self): model = ImageGPTForCausalImageModeling.from_pretrained( "hf-internal-testing/tiny-random-imagegpt", max_length=10 ).to(torch_device) input_ids = ids_tensor((3, 5), vocab_size=10) output_sequences_kwargs = model.generate(input_ids=input_ids).cpu() output_sequences = model.generate(input_ids).cpu() self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist()) self.assertEqual(output_sequences.shape, (3, 10)) def test_generate_input_ids_as_encoder_kwarg(self): article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart", max_length=5).to( torch_device ) model.config.eos_token_id = None input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device) output_sequences_kwargs = model.generate(input_ids=input_ids).cpu() output_sequences = model.generate(input_ids).cpu() self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist()) self.assertEqual(output_sequences.shape, (1, 5)) def test_generate_inputs_and_encoder_kwargs(self): article = """I need input_ids to generate""" tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2", max_length=10).to(torch_device) input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device) with self.assertRaises(ValueError): model.generate(input_ids, input_ids=input_ids) def test_generate_too_many_encoder_kwargs(self): article = """I need input_ids to generate""" tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2", max_length=10).to(torch_device) input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device) with self.assertRaises(ValueError): model.generate(input_ids=input_ids, inputs_embeds=input_ids) def test_generate_input_values_as_encoder_kwarg(self): input_values = floats_tensor((2, 250)) model = SpeechEncoderDecoderModel.from_pretrained("hf-internal-testing/tiny-random-speech-encoder-decoder") model = model.to(torch_device) output_sequences_kwargs = model.generate(input_values=input_values, max_length=5).cpu() output_sequences = model.generate(input_values, max_length=5).cpu() self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist()) self.assertEqual(output_sequences.shape, (2, 5)) def test_generate_input_features_as_encoder_kwarg(self): input_features = floats_tensor((3, 20, 24)) model = Speech2TextForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-speech_to_text") model = model.to(torch_device) output_sequences_kwargs = model.generate(input_features=input_features, max_length=5).cpu() output_sequences = model.generate(input_features, max_length=5).cpu() self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist()) self.assertEqual(output_sequences.shape, (3, 5)) def test_generate_pixel_values_as_encoder_kwarg(self): pixel_values = floats_tensor((2, 3, 30, 30)) model = VisionEncoderDecoderModel.from_pretrained("hf-internal-testing/tiny-random-vision-encoder-decoder") model = model.to(torch_device) output_sequences_kwargs = model.generate(pixel_values=pixel_values, max_length=5).cpu() output_sequences = model.generate(pixel_values, max_length=5).cpu() self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist()) self.assertEqual(output_sequences.shape, (2, 5)) def test_generate_encoder_outputs_attention_mask(self): input_values = floats_tensor((2, 250)).to(torch_device) attention_mask = torch.ones_like(input_values) model = SpeechEncoderDecoderModel.from_pretrained("hf-internal-testing/tiny-random-speech-encoder-decoder") model = model.to(torch_device) encoder = model.get_encoder() encoder_outputs = encoder(input_values) output_sequences_no_mask = model.generate(encoder_outputs=encoder_outputs).cpu() output_sequences_with_mask = model.generate(encoder_outputs=encoder_outputs, attention_mask=attention_mask) output_sequences_with_mask = output_sequences_with_mask.cpu() self.assertListEqual(output_sequences_no_mask.tolist(), output_sequences_with_mask.tolist()) def test_transition_scores_beam_search_encoder_decoder(self): articles = [ "Justin Timberlake and Jessica Biel, welcome to parenthood.", "Michael Phelps is arguably the most decorated Olympian of all time.", ] tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = BartForConditionalGeneration.from_pretrained( "hf-internal-testing/tiny-random-bart", max_length=10, num_beams=4, num_return_sequences=2, eos_token_id=None, return_dict_in_generate=True, output_scores=True, length_penalty=0.0, ) model = model.to(torch_device) input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids.to(torch_device) outputs = model.generate(input_ids=input_ids) transition_scores = model.compute_transition_beam_scores( outputs.sequences, outputs.scores, outputs.beam_indices ) transition_scores_sum = transition_scores.sum(-1) self.assertTrue(torch.allclose(transition_scores_sum, outputs.sequences_scores, atol=1e-3)) def test_transition_scores_beam_search_encoder_decoder_with_eos(self): articles = [ "Justin Timberlake and Jessica Biel, welcome to parenthood.", "Michael Phelps is arguably the most decorated Olympian of all time.", ] tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = BartForConditionalGeneration.from_pretrained( "hf-internal-testing/tiny-random-bart", max_length=10, num_beams=4, num_return_sequences=2, return_dict_in_generate=True, output_scores=True, length_penalty=0.0, ) model = model.to(torch_device) input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids.to(torch_device) outputs = model.generate(input_ids=input_ids) transition_scores = model.compute_transition_beam_scores( outputs.sequences, outputs.scores, outputs.beam_indices ) transition_scores_sum = transition_scores.sum(-1) self.assertTrue(torch.allclose(transition_scores_sum, outputs.sequences_scores, atol=1e-3)) def test_transition_scores_beam_search_decoder_only(self): articles = [ "Justin Timberlake", "Michael Phelps", ] tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") tokenizer.pad_token = tokenizer.eos_token model = GPT2LMHeadModel.from_pretrained( "hf-internal-testing/tiny-random-gpt2", max_length=10, num_beams=4, num_return_sequences=2, pad_token_id=tokenizer.eos_token_id, eos_token_id=None, return_dict_in_generate=True, output_scores=True, length_penalty=0.0, ) model = model.to(torch_device) input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids.to(torch_device) outputs = model.generate(input_ids=input_ids) transition_scores = model.compute_transition_beam_scores( outputs.sequences, outputs.scores, outputs.beam_indices ) transition_scores_sum = transition_scores.sum(-1) self.assertTrue(torch.allclose(transition_scores_sum, outputs.sequences_scores, atol=1e-3)) def test_transition_scores_beam_sample_encoder_decoder(self): articles = [ "Justin Timberlake and Jessica Biel, welcome to parenthood.", "Michael Phelps is arguably the most decorated Olympian of all time.", ] tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = BartForConditionalGeneration.from_pretrained( "hf-internal-testing/tiny-random-bart", do_sample=True, max_length=10, num_beams=4, num_return_sequences=2, eos_token_id=None, return_dict_in_generate=True, output_scores=True, length_penalty=0.0, ) model = model.to(torch_device) input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids.to(torch_device) outputs = model.generate(input_ids=input_ids) transition_scores = model.compute_transition_beam_scores( outputs.sequences, outputs.scores, outputs.beam_indices ) transition_scores_sum = transition_scores.sum(-1) self.assertTrue(torch.allclose(transition_scores_sum, outputs.sequences_scores, atol=1e-3)) def test_transition_scores_group_beam_search_encoder_decoder(self): articles = [ "Justin Timberlake and Jessica Biel, welcome to parenthood.", "Michael Phelps is arguably the most decorated Olympian of all time.", ] tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = BartForConditionalGeneration.from_pretrained( "hf-internal-testing/tiny-random-bart", max_length=10, num_beams=2, num_beam_groups=2, num_return_sequences=2, eos_token_id=None, return_dict_in_generate=True, output_scores=True, length_penalty=0.0, ) model = model.to(torch_device) input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids.to(torch_device) outputs = model.generate(input_ids=input_ids) transition_scores = model.compute_transition_beam_scores( outputs.sequences, outputs.scores, outputs.beam_indices ) transition_scores_sum = transition_scores.sum(-1) self.assertTrue(torch.allclose(transition_scores_sum, outputs.sequences_scores, atol=1e-3)) @slow def test_transition_scores_early_stopping(self): # This is an aggressive test that makes sure that `beam_search's` # transition scores are computed correctly for varying `num_return_sequences`, # `num_beams` and `batch_size > 1` # 2 x input_ids for "question: How are you? \n context: I had a long day, " input_ids = torch.tensor(2 * [[822, 10, 571, 33, 25, 58, 2625, 10, 27, 141, 3, 9, 307, 239, 6, 1]]).to( torch_device ) model = AutoModelForSeq2SeqLM.from_pretrained("t5-small").to(torch_device) result = model.generate( input_ids, max_length=10, return_dict_in_generate=True, output_scores=True, forced_eos_token_id=model.config.eos_token_id, num_beams=4, do_sample=False, num_return_sequences=3, length_penalty=0.0, ) transition_scores = model.compute_transition_beam_scores( sequences=result.sequences, scores=result.scores, beam_indices=result.beam_indices ) sum_transition_scores = torch.sum(transition_scores, dim=1) self.assertListEqual(sum_transition_scores.cpu().tolist(), result.sequences_scores.cpu().tolist()) def test_log_scores_sample_decoder_only(self): articles = ["I need input_ids to generate", "Short and"] tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") tokenizer.padding_side = "left" tokenizer.pad_token = tokenizer.eos_token model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) inputs = tokenizer(articles, return_tensors="pt", padding=True).to(torch_device) result = model.generate( **inputs, max_length=15, return_dict_in_generate=True, do_sample=False, output_scores=True, ) # decoder-only starts generating from `input_ids` begin_generation = inputs.input_ids.shape[-1] gen_sequences = result.sequences[:, begin_generation:] probs = torch.stack(result.scores, dim=1).softmax(-1) gen_probs = torch.gather(probs, 2, gen_sequences[:, :, None]).squeeze(-1) expected_probs = torch.tensor([[0.0014, 0.0015], [0.0014, 0.0014]]) self.assertTrue(torch.allclose(gen_probs.cpu(), expected_probs, atol=1e-3)) def test_log_scores_sample_encoder_decoder(self): articles = ["I need input_ids to generate", "Short and"] tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to(torch_device) inputs = tokenizer(articles, return_tensors="pt", padding=True).to(torch_device) result = model.generate( **inputs, max_length=3, return_dict_in_generate=True, do_sample=False, num_beams=1, output_scores=True, ) # encoder-decoder has one decoder_start_token_id by default begin_generation = 1 gen_sequences = result.sequences[:, begin_generation:] probs = torch.stack(result.scores, dim=1).softmax(-1) gen_probs = torch.gather(probs, 2, gen_sequences[:, :, None]).squeeze(-1) expected_probs = torch.tensor([[0.0013, 1.0000], [0.0013, 1.0000]]) self.assertTrue(torch.allclose(gen_probs.cpu(), expected_probs, atol=1e-3)) @slow def test_beam_search_example_integration(self): # exactly the example provided in the docstrings of beam search, which previously # failed after directly copying from it. Refer to PR #15555 tokenizer = AutoTokenizer.from_pretrained("t5-base") model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") encoder_input_str = "translate English to German: How old are you?" encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids # lets run beam search using 3 beams num_beams = 3 # define decoder start token ids input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) input_ids = input_ids * model.config.decoder_start_token_id # add encoder_outputs to model keyword arguments model_kwargs = { "encoder_outputs": model.get_encoder()( encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ) } # instantiate beam scorer beam_scorer = BeamSearchScorer( batch_size=1, num_beams=num_beams, device=model.device, ) # instantiate logits processors logits_processor = LogitsProcessorList( [ MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), ] ) outputs = model.beam_search(input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs) outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual(outputs, ["Wie alt bist du?"]) @slow def test_constrained_beam_search(self): model = GPT2LMHeadModel.from_pretrained("gpt2").to(torch_device) tokenizer = GPT2Tokenizer.from_pretrained("gpt2") force_tokens = tokenizer("scared", add_prefix_space=True, add_special_tokens=False).input_ids force_tokens_2 = tokenizer("big weapons", add_prefix_space=True, add_special_tokens=False).input_ids constraints = [ PhrasalConstraint(force_tokens), PhrasalConstraint(force_tokens_2), ] starting_text = ["The soldiers were not prepared and"] input_ids = tokenizer(starting_text, return_tensors="pt").input_ids.to(torch_device) outputs = model.generate( input_ids, constraints=constraints, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, max_length=30, remove_invalid_values=True, ) generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The soldiers were not prepared and didn't know what to do. They had no idea how they would react if" " the enemy attacked them, big weapons scared" ], ) @slow def test_constrained_beam_search_mixed(self): model = GPT2LMHeadModel.from_pretrained("gpt2").to(torch_device) tokenizer = GPT2Tokenizer.from_pretrained("gpt2") force_phrase = tokenizer("scared", add_prefix_space=True, add_special_tokens=False).input_ids flexible_phrases = tokenizer( ["scream", "screams", "screaming", "screamed"], add_prefix_space=True, add_special_tokens=False ).input_ids constraints = [ PhrasalConstraint(force_phrase), DisjunctiveConstraint(flexible_phrases), ] starting_text = ["The soldiers", "The child"] input_ids = tokenizer(starting_text, return_tensors="pt").input_ids.to(torch_device) outputs = model.generate( input_ids, constraints=constraints, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, # max_length=20, remove_invalid_values=True, ) generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The soldiers, who had been stationed at the base for more than a year before being evacuated" " screaming scared", "The child was taken to a local hospital where he died.\n 'I don't think screaming scared", ], ) @slow def test_constrained_beam_search_mixed_mixin(self): model = GPT2LMHeadModel.from_pretrained("gpt2").to(torch_device) tokenizer = GPT2Tokenizer.from_pretrained("gpt2") force_word = "scared" force_flexible = ["scream", "screams", "screaming", "screamed"] force_words_ids = [ tokenizer([force_word], add_prefix_space=True, add_special_tokens=False).input_ids, tokenizer(force_flexible, add_prefix_space=True, add_special_tokens=False).input_ids, ] starting_text = ["The soldiers", "The child"] input_ids = tokenizer(starting_text, return_tensors="pt").input_ids.to(torch_device) outputs = model.generate( input_ids, force_words_ids=force_words_ids, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, remove_invalid_values=True, ) generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The soldiers, who had been stationed at the base for more than a year before being evacuated" " screaming scared", "The child was taken to a local hospital where he died.\n 'I don't think screaming scared", ], ) @slow def test_constrained_beam_search_example_translation_mixin(self): tokenizer = AutoTokenizer.from_pretrained("t5-base") model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") encoder_input_str = "translate English to German: How old are you?" force_words = ["sind"] input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids force_words_ids = tokenizer(force_words, add_special_tokens=False).input_ids outputs = model.generate( input_ids, force_words_ids=force_words_ids, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, remove_invalid_values=True, ) outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual(outputs, ["Wie alt sind Sie?"]) @slow def test_constrained_beam_search_example_integration(self): tokenizer = AutoTokenizer.from_pretrained("t5-base") model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") encoder_input_str = "translate English to German: How old are you?" encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids # lets run beam search using 5 beams num_beams = 5 # define decoder start token ids input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) input_ids = input_ids * model.config.decoder_start_token_id # add encoder_outputs to model keyword arguments model_kwargs = { "encoder_outputs": model.get_encoder()( encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ) } constraint_str = "sind" constraint_token_ids = tokenizer.encode(constraint_str)[:-1] # remove eos token constraints = [PhrasalConstraint(token_ids=constraint_token_ids)] # instantiate beam scorer beam_scorer = ConstrainedBeamSearchScorer( batch_size=1, num_beams=num_beams, device=model.device, constraints=constraints ) # instantiate logits processors logits_processor = LogitsProcessorList( [ MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), ] ) outputs = model.constrained_beam_search( input_ids, beam_scorer, constraints=constraints, logits_processor=logits_processor, **model_kwargs ) outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual(outputs, ["Wie alt sind Sie?"]) def test_constrained_beam_search_mixin_type_checks(self): tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/t5-tiny-random") model = AutoModelForSeq2SeqLM.from_pretrained("patrickvonplaten/t5-tiny-random") encoder_input_str = "translate English to German: How old are you?" input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids with self.assertRaises(ValueError): force_words = ["sind"] force_words_ids = tokenizer(force_words, return_tensors="pt").input_ids model.generate( input_ids, force_words_ids=force_words_ids, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, remove_invalid_values=True, ) with self.assertRaises(ValueError): force_words = ["sind"] force_words_ids = [tokenizer(force_words, return_tensors="pt").input_ids] model.generate( input_ids, force_words_ids=force_words_ids, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, remove_invalid_values=True, ) with self.assertRaises(ValueError): model.generate(input_ids, force_words_ids=[]) with self.assertRaises(ValueError): model.generate(input_ids, force_words_ids=[[-1]]) with self.assertRaises(ValueError): model.generate(input_ids, force_words_ids=[[[-1]]]) def test_contrastive_search_batched(self): # Tests that contrastive search works with batched inputs (i.e. has the same output as for non-batched inputs) articles = ["Foo", "Bar Baz"] tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to(torch_device) model.config.eos_token_id = None input_ids_batched = tokenizer(articles, padding=True, return_tensors="pt").input_ids.to(torch_device) input_ids = tokenizer(articles[1], return_tensors="pt").input_ids.to(torch_device) output_sequences_batched = model.generate( input_ids=input_ids_batched, penalty_alpha=0.6, top_k=4, return_dict_in_generate=True, output_scores=True ) output_sequences = model.generate( input_ids=input_ids, penalty_alpha=0.6, top_k=4, return_dict_in_generate=True, output_scores=True ) batched_out = tokenizer.decode(output_sequences_batched.sequences[1], skip_special_tokens=True) out = tokenizer.decode(output_sequences.sequences[0], skip_special_tokens=True) self.assertEqual(batched_out, out) # output_sequences_batched.scores[0][1] -> 1st set of logits, 2nd sequence max_score_diff = (output_sequences_batched.scores[0][1] - output_sequences.scores[0][0]).abs().max() self.assertTrue(max_score_diff < 1e-5) def test_validate_generation_inputs(self): tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") model = AutoModelForSeq2SeqLM.from_pretrained("hf-internal-testing/tiny-random-t5") encoder_input_str = "Hello world" input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(ValueError, "do_samples"): model.generate(input_ids, do_samples=True) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(ValueError, "foo"): fake_model_kwargs = {"foo": "bar"} model.generate(input_ids, **fake_model_kwargs)
# coding=utf-8 # Copyright 2020 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ..test_modeling_common import floats_tensor, ids_tensor if is_torch_available(): import torch from transformers import ( AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer, BartForConditionalGeneration, BartTokenizer, GPT2LMHeadModel, GPT2Tokenizer, ImageGPTForCausalImageModeling, Speech2TextForConditionalGeneration, SpeechEncoderDecoderModel, T5ForConditionalGeneration, VisionEncoderDecoderModel, pipeline, top_k_top_p_filtering, ) from transformers.generation import ( BeamSampleDecoderOnlyOutput, BeamSampleEncoderDecoderOutput, BeamSearchDecoderOnlyOutput, BeamSearchEncoderDecoderOutput, BeamSearchScorer, ConstrainedBeamSearchScorer, DisjunctiveConstraint, ForcedBOSTokenLogitsProcessor, ForcedEOSTokenLogitsProcessor, GreedySearchDecoderOnlyOutput, GreedySearchEncoderDecoderOutput, HammingDiversityLogitsProcessor, InfNanRemoveLogitsProcessor, LogitsProcessorList, MaxLengthCriteria, MinLengthLogitsProcessor, NoBadWordsLogitsProcessor, NoRepeatNGramLogitsProcessor, PhrasalConstraint, RepetitionPenaltyLogitsProcessor, SampleDecoderOnlyOutput, SampleEncoderDecoderOutput, StoppingCriteria, StoppingCriteriaList, TemperatureLogitsWarper, TopKLogitsWarper, TopPLogitsWarper, ) class GenerationTesterMixin: model_tester = None all_generative_model_classes = () input_name = "input_ids" def _get_input_ids_and_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict[self.input_name] # cut to half length & take max batch_size 3 max_batch_size = 2 sequence_length = input_ids.shape[-1] // 2 input_ids = input_ids[:max_batch_size, :sequence_length] # generate max 3 tokens max_length = input_ids.shape[-1] + 3 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` config.pad_token_id = config.eos_token_id # TransfoXL has no attention mask if "transfoxl" in config.__class__.__name__.lower(): attention_mask = None else: attention_mask = torch.ones_like(input_ids, dtype=torch.long)[:max_batch_size, :sequence_length] return config, input_ids, attention_mask, max_length @staticmethod def _get_logits_processor_and_kwargs( input_length, eos_token_id, forced_bos_token_id=None, forced_eos_token_id=None, max_length=None, diversity_penalty=None, ): process_kwargs = { "min_length": input_length + 1 if max_length is None else max_length - 1, "bad_words_ids": [[1, 0]], "no_repeat_ngram_size": 2, "repetition_penalty": 1.2, } logits_processor = LogitsProcessorList( ( [ HammingDiversityLogitsProcessor(diversity_penalty, num_beams=2, num_beam_groups=2), ] if diversity_penalty is not None else [] ) + ( [ MinLengthLogitsProcessor(process_kwargs["min_length"], eos_token_id), ] if eos_token_id is not None else [] ) + ( [ ForcedBOSTokenLogitsProcessor(forced_bos_token_id), ] if forced_bos_token_id is not None else [] ) + ( [ForcedEOSTokenLogitsProcessor(max_length, forced_eos_token_id)] if forced_eos_token_id is not None else [] ) + [ NoBadWordsLogitsProcessor(process_kwargs["bad_words_ids"], eos_token_id), NoRepeatNGramLogitsProcessor(process_kwargs["no_repeat_ngram_size"]), RepetitionPenaltyLogitsProcessor(process_kwargs["repetition_penalty"]), ] ) return process_kwargs, logits_processor @staticmethod def _get_warper_and_kwargs(num_beams): warp_kwargs = {"top_k": 10, "top_p": 0.7, "temperature": 0.7} logits_warper = LogitsProcessorList( [ TemperatureLogitsWarper(warp_kwargs["temperature"]), TopKLogitsWarper(top_k=warp_kwargs["top_k"], min_tokens_to_keep=(2 if num_beams > 1 else 1)), TopPLogitsWarper(top_p=warp_kwargs["top_p"], min_tokens_to_keep=(2 if num_beams > 1 else 1)), ] ) return warp_kwargs, logits_warper @staticmethod def _get_beam_scorer_and_kwargs(batch_size, max_length, num_return_sequences=1): beam_kwargs = { "early_stopping": False, "length_penalty": 2.0, "num_beams": 2, "num_return_sequences": num_return_sequences, } beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=beam_kwargs["num_beams"], device=torch_device, length_penalty=beam_kwargs["length_penalty"], do_early_stopping=beam_kwargs["early_stopping"], num_beam_hyps_to_keep=num_return_sequences, ) return beam_kwargs, beam_scorer @staticmethod def _get_diverse_beam_scorer_and_kwargs(batch_size, max_length, num_return_sequences=1): beam_kwargs = { "early_stopping": False, "length_penalty": 2.0, "num_beams": 2, "num_return_sequences": num_return_sequences, "num_beam_groups": 2, # one beam per group "diversity_penalty": 2.0, } beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=beam_kwargs["num_beams"], device=torch_device, length_penalty=beam_kwargs["length_penalty"], do_early_stopping=beam_kwargs["early_stopping"], num_beam_hyps_to_keep=num_return_sequences, num_beam_groups=beam_kwargs["num_beam_groups"], ) return beam_kwargs, beam_scorer @staticmethod def _get_constrained_beam_scorer_and_kwargs(batch_size, max_length, constraints, num_return_sequences=1): beam_kwargs = { "early_stopping": False, "length_penalty": 2.0, "num_beams": num_return_sequences * 4, "num_return_sequences": num_return_sequences, } beam_scorer = ConstrainedBeamSearchScorer( batch_size=batch_size, constraints=constraints, num_beams=beam_kwargs["num_beams"], device=torch_device, length_penalty=beam_kwargs["length_penalty"], do_early_stopping=beam_kwargs["early_stopping"], num_beam_hyps_to_keep=num_return_sequences, ) return beam_kwargs, beam_scorer @staticmethod def _get_encoder_outputs( model, input_ids, attention_mask, output_attentions=None, output_hidden_states=None, num_interleave=1 ): encoder = model.get_encoder() encoder_outputs = encoder( input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.repeat_interleave( num_interleave, dim=0 ) input_ids = torch.zeros_like(input_ids[:, :1]) + model._get_decoder_start_token_id() attention_mask = None return encoder_outputs, input_ids, attention_mask def _greedy_generate( self, model, input_ids, attention_mask, max_length, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], eos_token_id=model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) kwargs = {} model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, num_beams=1, max_length=max_length, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, remove_invalid_values=True, **logits_process_kwargs, **model_kwargs, ) if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_greedy = model.greedy_search( input_ids, max_length=max_length, logits_processor=logits_processor, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_greedy, output_generate def _sample_generate( self, model, input_ids, attention_mask, max_length, num_return_sequences, logits_processor, logits_warper, logits_warper_kwargs, process_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): torch.manual_seed(0) model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=True, num_beams=1, max_length=max_length, num_return_sequences=num_return_sequences, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, remove_invalid_values=True, **logits_warper_kwargs, **process_kwargs, **model_kwargs, ) torch.manual_seed(0) kwargs = {} if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=num_return_sequences, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(num_return_sequences, dim=0) # prevent flaky generation test failures logits_processor.append(InfNanRemoveLogitsProcessor()) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_sample = model.sample( input_ids.repeat_interleave(num_return_sequences, dim=0), max_length=max_length, logits_processor=logits_processor, logits_warper=logits_warper, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_sample, output_generate def _beam_search_generate( self, model, input_ids, attention_mask, max_length, beam_scorer, beam_kwargs, logits_processor, logits_process_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, max_length=max_length, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, remove_invalid_values=True, **beam_kwargs, **logits_process_kwargs, **model_kwargs, ) # beam_search does not automatically interleave `batch_size` dim for `num_beams` kwargs = {} if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=beam_scorer.num_beams, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(beam_scorer.num_beams, dim=0) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_beam_search = model.beam_search( input_ids.repeat_interleave(beam_scorer.num_beams, dim=0), beam_scorer, max_length=max_length, logits_processor=logits_processor, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_generate, output_beam_search def _beam_sample_generate( self, model, input_ids, attention_mask, max_length, num_return_sequences, beam_scorer, beam_kwargs, logits_warper, logits_warper_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): torch.manual_seed(0) model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=True, max_length=max_length, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, remove_invalid_values=True, **beam_kwargs, **logits_warper_kwargs, **model_kwargs, ) # beam_search does not automatically interleave `batch_size` dim for `num_beams * num_return_sequences` kwargs = {} if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=beam_scorer.num_beams * num_return_sequences, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(beam_scorer.num_beams * num_return_sequences, dim=0) # prevent flaky generation test failures logits_processor = LogitsProcessorList() logits_processor.append(InfNanRemoveLogitsProcessor()) torch.manual_seed(0) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_beam_sample = model.beam_sample( input_ids.repeat_interleave(beam_scorer.num_beams * num_return_sequences, dim=0), beam_scorer, max_length=max_length, logits_warper=logits_warper, logits_processor=logits_processor, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_generate, output_beam_sample def _group_beam_search_generate( self, model, input_ids, attention_mask, max_length, beam_scorer, beam_kwargs, logits_processor, logits_process_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, max_length=max_length, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, remove_invalid_values=True, **beam_kwargs, **logits_process_kwargs, **model_kwargs, ) # group_beam_search does not automatically interleave `batch_size` dim for `num_beams` kwargs = {} if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=beam_scorer.num_beams, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(beam_scorer.num_beams, dim=0) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_group_beam_search = model.group_beam_search( input_ids.repeat_interleave(beam_scorer.num_beams, dim=0), beam_scorer, max_length=max_length, logits_processor=logits_processor, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_generate, output_group_beam_search def _constrained_beam_search_generate( self, model, input_ids, attention_mask, max_length, constrained_beam_scorer, constraints, beam_kwargs, logits_processor, logits_process_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, max_length=max_length, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, remove_invalid_values=True, constraints=constraints, **beam_kwargs, **logits_process_kwargs, **model_kwargs, ) # group_beam_search does not automatically interleave `batch_size` dim for `num_beams` kwargs = {} if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=constrained_beam_scorer.num_beams, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(constrained_beam_scorer.num_beams, dim=0) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_group_beam_search = model.constrained_beam_search( input_ids.repeat_interleave(constrained_beam_scorer.num_beams, dim=0), constrained_beam_scorer, max_length=max_length, logits_processor=logits_processor, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_generate, output_group_beam_search def _contrastive_generate( self, model, input_ids, attention_mask, max_length, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): contrastive_search_kwargs = { "penalty_alpha": 0.6, "top_k": 5, } if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], eos_token_id=model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) kwargs = {} model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, num_beams=1, max_length=max_length, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, remove_invalid_values=True, **logits_process_kwargs, **model_kwargs, **contrastive_search_kwargs, ) if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)]) output_contrastive = model.contrastive_search( input_ids, stopping_criteria=stopping_criteria, logits_processor=logits_processor, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, **contrastive_search_kwargs, ) return output_contrastive, output_generate def test_greedy_generate(self): # check `generate()` and `greedy_search()` are equal for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # test old generation output for backwards compatibility model = model_class(config).to(torch_device).eval() output_greedy, output_generate = self._greedy_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length ) self.assertListEqual(output_greedy.tolist(), output_generate.tolist()) def test_greedy_generate_dict_outputs(self): for model_class in self.all_generative_model_classes: # disable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() config.use_cache = False model = model_class(config).to(torch_device).eval() output_greedy, output_generate = self._greedy_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_greedy, GreedySearchEncoderDecoderOutput) self.assertIsInstance(output_generate, GreedySearchEncoderDecoderOutput) else: self.assertIsInstance(output_greedy, GreedySearchDecoderOnlyOutput) self.assertIsInstance(output_generate, GreedySearchDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_greedy.sequences.tolist()) for output in (output_greedy, output_generate): self._check_outputs(output, input_ids, model.config) def test_greedy_generate_dict_outputs_use_cache(self): for model_class in self.all_generative_model_classes: # enable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() if not hasattr(config, "use_cache"): # only relevant if model has "use_cache" return config.use_cache = True config.is_decoder = True model = model_class(config).to(torch_device).eval() output_greedy, output_generate = self._greedy_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) self.assertListEqual(output_generate.sequences.tolist(), output_greedy.sequences.tolist()) for output in (output_greedy, output_generate): self._check_outputs(output, input_ids, model.config, use_cache=True) def test_sample_generate(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1) # check `generate()` and `sample()` are equal output_sample, output_generate = self._sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, num_return_sequences=1, logits_processor=logits_processor, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, process_kwargs=process_kwargs, ) self.assertListEqual(output_sample.tolist(), output_generate.tolist()) # check `generate()` and `sample()` yield equal results for `num_return_sequences` output_sample, output_generate = self._sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, num_return_sequences=3, logits_processor=logits_processor, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, process_kwargs=process_kwargs, ) self.assertListEqual(output_sample.tolist(), output_generate.tolist()) def test_sample_generate_dict_output(self): for model_class in self.all_generative_model_classes: # disable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() config.use_cache = False model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1) output_sample, output_generate = self._sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, num_return_sequences=2, logits_processor=logits_processor, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, process_kwargs=process_kwargs, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_sample, SampleEncoderDecoderOutput) self.assertIsInstance(output_generate, SampleEncoderDecoderOutput) else: self.assertIsInstance(output_sample, SampleDecoderOnlyOutput) self.assertIsInstance(output_generate, SampleDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_sample.sequences.tolist()) for output in (output_sample, output_generate): self._check_outputs(output, input_ids, model.config, num_return_sequences=2) def test_beam_search_generate(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, ) beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length) # check `generate()` and `beam_search()` are equal output_generate, output_beam_search = self._beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_process_kwargs=logits_process_kwargs, logits_processor=logits_processor, ) self.assertListEqual(output_generate.tolist(), output_beam_search.tolist()) # check `generate()` and `beam_search()` are equal for `num_return_sequences` num_return_sequences = 2 if model.config.is_encoder_decoder: max_length = 4 beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs( input_ids.shape[0], max_length, num_return_sequences=num_return_sequences ) output_generate, output_beam_search = self._beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_process_kwargs=logits_process_kwargs, logits_processor=logits_processor, ) self.assertListEqual(output_generate.tolist(), output_beam_search.tolist()) def test_beam_search_generate_dict_output(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # disable cache config.use_cache = False # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, ) beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length) output_generate, output_beam_search = self._beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_process_kwargs=logits_process_kwargs, logits_processor=logits_processor, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_beam_search, BeamSearchEncoderDecoderOutput) self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput) else: self.assertIsInstance(output_beam_search, BeamSearchDecoderOnlyOutput) self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_beam_search.sequences.tolist()) self.assertTrue( torch.allclose(output_generate["sequences_scores"], output_beam_search["sequences_scores"], atol=1e-3) ) self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],)) self.assertTrue((output_generate["sequences_scores"] < 0).all().item()) for output in (output_beam_search, output_generate): self._check_outputs(output, input_ids, model.config, num_return_sequences=beam_scorer.num_beams) def test_beam_search_generate_dict_outputs_use_cache(self): for model_class in self.all_generative_model_classes: # enable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None if not hasattr(config, "use_cache"): # only relevant if model has "use_cache" return model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, ) beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length) config.use_cache = True config.is_decoder = True model = model_class(config).to(torch_device).eval() output_beam, output_generate = self._beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_process_kwargs=logits_process_kwargs, logits_processor=logits_processor, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) self.assertListEqual(output_generate.sequences.tolist(), output_beam.sequences.tolist()) for output in (output_beam, output_generate): self._check_outputs( output, input_ids, model.config, use_cache=True, num_return_sequences=beam_scorer.num_beams ) def test_beam_sample_generate(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1) model = model_class(config).to(torch_device).eval() # check `generate()` and `beam_search()` are equal # change `num_return_sequences = 2` but not for `beam_scorer` num_return_sequences = 2 if model.config.is_encoder_decoder: max_length = 4 beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs( input_ids.shape[0] * num_return_sequences, max_length ) beam_kwargs["num_return_sequences"] = num_return_sequences output_generate, output_beam_sample = self._beam_sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, num_return_sequences=num_return_sequences, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, ) self.assertListEqual(output_generate.tolist(), output_beam_sample.tolist()) def test_beam_sample_generate_dict_output(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # disable cache config.use_cache = False # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1) num_return_sequences = 2 if model.config.is_encoder_decoder: max_length = 4 beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs( input_ids.shape[0] * num_return_sequences, max_length ) beam_kwargs["num_return_sequences"] = num_return_sequences output_beam_sample, output_generate = self._beam_sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, num_return_sequences=num_return_sequences, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_beam_sample, BeamSampleEncoderDecoderOutput) self.assertIsInstance(output_generate, BeamSampleEncoderDecoderOutput) else: self.assertIsInstance(output_beam_sample, BeamSampleDecoderOnlyOutput) self.assertIsInstance(output_generate, BeamSampleDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_beam_sample.sequences.tolist()) self.assertTrue( torch.allclose(output_generate["sequences_scores"], output_beam_sample["sequences_scores"], atol=1e-3) ) self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],)) self.assertTrue((output_generate["sequences_scores"] < 0).all().item()) for output in (output_beam_sample, output_generate): self._check_outputs( output, input_ids, model.config, num_return_sequences=num_return_sequences * beam_scorer.num_beams ) def test_generate_without_input_ids(self): config, _, _, max_length = self._get_input_ids_and_config() # if no bos token id => cannot generate from None if config.bos_token_id is None: return for model_class in self.all_generative_model_classes: model = model_class(config).to(torch_device) model.eval() output_ids_generate = model.generate(do_sample=False, max_length=max_length, remove_invalid_values=True) self.assertIsNotNone(output_ids_generate) def test_group_beam_search_generate(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, diversity_penalty=2.0, ) # check `generate()` and `group_beam_search()` are equal beam_kwargs, beam_scorer = self._get_diverse_beam_scorer_and_kwargs(input_ids.shape[0], max_length) output_generate, output_group_beam_search = self._group_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, ) self.assertListEqual(output_generate.tolist(), output_group_beam_search.tolist()) # check `generate()` and `group_beam_search()` are equal for `num_return_sequences` num_return_sequences = 2 if model.config.is_encoder_decoder: max_length = 4 beam_kwargs, beam_scorer = self._get_diverse_beam_scorer_and_kwargs( input_ids.shape[0], max_length, num_return_sequences=num_return_sequences ) output_generate, output_group_beam_search = self._group_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, ) self.assertListEqual(output_generate.tolist(), output_group_beam_search.tolist()) def test_group_beam_search_generate_dict_output(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() config.use_cache = False # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, diversity_penalty=2.0, ) num_return_sequences = 1 beam_kwargs, beam_scorer = self._get_diverse_beam_scorer_and_kwargs( input_ids.shape[0], max_length, num_return_sequences=num_return_sequences ) output_generate, output_group_beam_search = self._group_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_group_beam_search, BeamSearchEncoderDecoderOutput) self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput) else: self.assertIsInstance(output_group_beam_search, BeamSearchDecoderOnlyOutput) self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_group_beam_search.sequences.tolist()) self.assertTrue( torch.allclose( output_generate["sequences_scores"], output_group_beam_search["sequences_scores"], atol=1e-3 ) ) self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],)) self.assertTrue((output_generate["sequences_scores"] < 0).all().item()) for output in (output_group_beam_search, output_generate): self._check_outputs( output, input_ids, model.config, num_return_sequences=num_return_sequences * beam_scorer.num_beams ) def test_constrained_beam_search_generate(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() max_length = 20 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, ) # check `generate()` and `constrained_beam_search()` are equal # Sample constraints if not input_ids.dtype == torch.float32: min_id = torch.min(input_ids) + 3 max_id = torch.max(input_ids) else: # otherwise this throws an error for Speech2TextModel since its inputs are floating points min_id = 3 max_id = 100 force_tokens = torch.randint(min_id, max_id, (1, 2)).tolist()[0] constraints = [ PhrasalConstraint(force_tokens), ] beam_kwargs, beam_scorer = self._get_constrained_beam_scorer_and_kwargs( input_ids.shape[0], max_length, constraints, num_return_sequences=1 ) output_generate, output_beam_search = self._constrained_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, constrained_beam_scorer=beam_scorer, constraints=constraints, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, ) self.assertListEqual(output_generate.tolist(), output_beam_search.tolist()) for generation_output in output_generate: self._check_sequence_inside_sequence(force_tokens, generation_output) # check `generate()` and `constrained_beam_search()` are equal for `num_return_sequences` # Sample constraints force_tokens = torch.randint(min_id, max_id, (1, 2)).tolist()[0] constraints = [ PhrasalConstraint(force_tokens), ] num_return_sequences = 2 max_length = 20 beam_kwargs, beam_scorer = self._get_constrained_beam_scorer_and_kwargs( input_ids.shape[0], max_length, constraints, num_return_sequences=num_return_sequences ) output_generate, output_beam_search = self._constrained_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, constrained_beam_scorer=beam_scorer, constraints=constraints, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, ) self.assertListEqual(output_generate.tolist(), output_beam_search.tolist()) for generation_output in output_generate: self._check_sequence_inside_sequence(force_tokens, generation_output) def test_constrained_beam_search_generate_dict_output(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # disable cache config.use_cache = False # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 20 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, ) # Sample constraints if not input_ids.dtype == torch.float32: min_id = torch.min(input_ids) + 3 max_id = torch.max(input_ids) else: # otherwise this throws an error for Speech2TextModel since its inputs are floating points min_id = 3 max_id = 100 force_tokens = torch.randint(min_id, max_id, (1, 2)).tolist()[0] constraints = [ PhrasalConstraint(force_tokens), ] beam_kwargs, beam_scorer = self._get_constrained_beam_scorer_and_kwargs( input_ids.shape[0], max_length, constraints, num_return_sequences=1 ) output_generate, output_beam_search = self._constrained_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, constrained_beam_scorer=beam_scorer, constraints=constraints, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_beam_search, BeamSearchEncoderDecoderOutput) self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput) else: self.assertIsInstance(output_beam_search, BeamSearchDecoderOnlyOutput) self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_beam_search.sequences.tolist()) self.assertTrue( torch.allclose(output_generate["sequences_scores"], output_beam_search["sequences_scores"], atol=1e-3) ) self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],)) self.assertTrue((output_generate["sequences_scores"] < 0).all().item()) for output in (output_beam_search, output_generate): self._check_outputs(output, input_ids, model.config, num_return_sequences=beam_scorer.num_beams) def test_contrastive_generate(self): # check `generate()` and `contrastive_search()` are equal for model_class in self.all_generative_model_classes: # won't fix: FSMT and Reformer have a different cache variable type (and format). if any(model_name in model_class.__name__.lower() for model_name in ["fsmt", "reformer"]): return config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # NOTE: contrastive search only works with cache on at the moment. if not hasattr(config, "use_cache"): return config.use_cache = True config.is_decoder = True # test old generation output for backwards compatibility model = model_class(config).to(torch_device).eval() output_contrastive, output_generate = self._contrastive_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length ) self.assertListEqual(output_contrastive.tolist(), output_generate.tolist()) def test_contrastive_generate_dict_outputs_use_cache(self): for model_class in self.all_generative_model_classes: # won't fix: FSMT and Reformer have a different cache variable type (and format). if any(model_name in model_class.__name__.lower() for model_name in ["fsmt", "reformer"]): return # enable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # NOTE: contrastive search only works with cache on at the moment. if not hasattr(config, "use_cache"): return config.use_cache = True config.is_decoder = True model = model_class(config).to(torch_device).eval() output_contrastive, output_generate = self._contrastive_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) self.assertListEqual(output_generate.sequences.tolist(), output_contrastive.sequences.tolist()) for output in (output_contrastive, output_generate): self._check_outputs(output, input_ids, model.config, use_cache=True) def test_generate_with_head_masking(self): """Test designed for encoder-decoder models to ensure the attention head masking is used.""" attention_names = ["encoder_attentions", "decoder_attentions", "cross_attentions"] for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() model = model_class(config).to(torch_device) # We want to test only encoder-decoder models if not config.is_encoder_decoder: continue head_masking = { "head_mask": torch.zeros(config.encoder_layers, config.encoder_attention_heads, device=torch_device), "decoder_head_mask": torch.zeros( config.decoder_layers, config.decoder_attention_heads, device=torch_device ), "cross_attn_head_mask": torch.zeros( config.decoder_layers, config.decoder_attention_heads, device=torch_device ), } signature = inspect.signature(model.forward) # We want to test only models where encoder/decoder head masking is implemented if not set(head_masking.keys()) < set([*signature.parameters.keys()]): continue for attn_name, (name, mask) in zip(attention_names, head_masking.items()): out = model.generate( input_ids, attention_mask=attention_mask, num_beams=1, output_attentions=True, return_dict_in_generate=True, remove_invalid_values=True, **{name: mask}, ) # We check the state of decoder_attentions and cross_attentions just from the last step attn_weights = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights]), 0.0) def _check_outputs(self, output, input_ids, config, use_cache=False, num_return_sequences=1): batch_size, seq_length = input_ids.shape num_sequences_in_output = batch_size * num_return_sequences gen_len = ( output.sequences.shape[-1] - 1 if config.is_encoder_decoder else output.sequences.shape[-1] - seq_length ) # scores self._check_scores(num_sequences_in_output, output.scores, length=gen_len, config=config) # Attentions if config.is_encoder_decoder: # encoder self._check_encoder_attention_for_generate(output.encoder_attentions, batch_size, config, seq_length) # decoder self._check_attentions_for_generate( num_sequences_in_output, output.decoder_attentions, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) else: # if use_cache first input is equal to no use_cache, so skip here attentions = output.attentions if not use_cache else output.attentions[1:] min_length = seq_length if not use_cache else seq_length + 1 self._check_attentions_for_generate( num_sequences_in_output, attentions=attentions, min_length=min_length, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) # Hidden States if config.is_encoder_decoder: # encoder self._check_encoder_hidden_states_for_generate( output.encoder_hidden_states, batch_size, config, seq_length ) # decoder self._check_hidden_states_for_generate( num_sequences_in_output, output.decoder_hidden_states, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) else: # if use_cache first input is equal to no use_cache, so skip here hidden_states = output.hidden_states if not use_cache else output.hidden_states[1:] min_length = seq_length if not use_cache else seq_length + 1 self._check_hidden_states_for_generate( num_sequences_in_output, hidden_states, min_length=min_length, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) def _check_scores(self, batch_size, scores, length, config): expected_shape = (batch_size, config.vocab_size) self.assertIsInstance(scores, tuple) self.assertEqual(len(scores), length) self.assertListEqual([iter_scores.shape for iter_scores in scores], [expected_shape] * len(scores)) def _check_attentions_for_generate( self, batch_size, attentions, min_length, max_length, config, use_cache=False, num_beam_groups=1 ): self.assertIsInstance(attentions, tuple) self.assertListEqual( [isinstance(iter_attentions, tuple) for iter_attentions in attentions], [True] * len(attentions) ) self.assertEqual(len(attentions), (max_length - min_length) * num_beam_groups) for idx, iter_attentions in enumerate(attentions): tgt_len = min_length + idx if not use_cache else 1 src_len = min_length + idx expected_shape = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(iter_attentions) ) def _check_encoder_attention_for_generate(self, attentions, batch_size, config, seq_length): encoder_expected_shape = (batch_size, config.num_attention_heads, seq_length, seq_length) self.assertIsInstance(attentions, tuple) self.assertListEqual( [layer_attentions.shape for layer_attentions in attentions], [encoder_expected_shape] * len(attentions), ) def _check_hidden_states_for_generate( self, batch_size, hidden_states, min_length, max_length, config, use_cache=False, num_beam_groups=1 ): self.assertIsInstance(hidden_states, tuple) self.assertListEqual( [isinstance(iter_hidden_states, tuple) for iter_hidden_states in hidden_states], [True] * len(hidden_states), ) self.assertEqual(len(hidden_states), (max_length - min_length) * num_beam_groups) for idx, iter_hidden_states in enumerate(hidden_states): seq_len = min_length + idx if not use_cache else 1 expected_shape = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states], [expected_shape] * len(iter_hidden_states), ) def _check_encoder_hidden_states_for_generate(self, hidden_states, batch_size, config, seq_length): encoder_expected_shape = (batch_size, seq_length, config.hidden_size) self.assertIsInstance(hidden_states, tuple) self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in hidden_states], [encoder_expected_shape] * len(hidden_states), ) def _check_sequence_inside_sequence(self, tensor_1, tensor_2): # check if tensor_1 inside tensor_2 or tensor_2 inside tensor_1. # set to same device. we don't care what device. if not isinstance(tensor_1, list): tensor_1 = tensor_1.cpu().tolist() if not isinstance(tensor_2, list): tensor_2 = tensor_2.cpu().tolist() in_order = len(tensor_1) <= len(tensor_2) longer = tensor_2 if in_order else tensor_1 shorter = tensor_1 if in_order else tensor_2 flag = False chunk_size = len(shorter) for chunk_idx in range(len(longer) - chunk_size + 1): subseq = longer[chunk_idx : chunk_idx + chunk_size] if subseq == shorter: flag = True break self.assertTrue(flag) @require_torch class UtilsFunctionsTest(unittest.TestCase): # tests whether the top_k_top_p function behaves as expected def test_top_k_top_p_filtering(self): logits = torch.tensor( [ [ 8.2220991, # 3rd highest value; idx. 0 -0.5620044, 5.23229752, 4.0386393, -6.8798378, -0.54785802, -3.2012153, 2.92777176, 1.88171953, 7.35341276, 8.43207833, # 2nd highest value; idx. 10 -9.85711836, -5.96209236, -1.13039161, -7.1115294, -0.8369633, -5.3186408, 7.06427407, 0.81369344, -0.82023817, -5.9179796, 0.58813443, -6.99778438, 4.71551189, -0.18771637, 7.44020759, # 4th highest value; idx. 25 9.38450987, # 1st highest value; idx. 26 2.12662941, -9.32562038, 2.35652522, ], # cummulative prob of 4 highest values <= 0.6 [ 0.58425518, 4.53139238, -5.57510464, -6.28030699, -7.19529503, -4.02122551, 1.39337037, -6.06707057, 1.59480517, -9.643119, 0.03907799, 0.67231762, -8.88206726, 6.27115922, # 4th highest value; idx. 13 2.28520723, 4.82767506, 4.30421368, 8.8275313, # 2nd highest value; idx. 17 5.44029958, -4.4735794, 7.38579536, # 3rd highest value; idx. 20 -2.91051663, 2.61946077, -2.5674762, -9.48959302, -4.02922645, -1.35416918, 9.67702323, # 1st highest value; idx. 27 -5.89478553, 1.85370467, ], # cummulative prob of 4 highest values <= 0.6 ], dtype=torch.float, device=torch_device, ) non_inf_expected_idx = torch.tensor( [[0, 0], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 20], [1, 27]], dtype=torch.long, device=torch_device, ) # expected non filtered idx as noted above non_inf_expected_output = torch.tensor( [ 8.2221, 8.4321, 7.4402, 9.3845, 6.2712, 8.8275, 7.3858, 9.6770, ], # expected non filtered values as noted above dtype=torch.float, device=torch_device, ) output = top_k_top_p_filtering(logits, top_k=10, top_p=0.6, min_tokens_to_keep=4) non_inf_output = output[output != -float("inf")].to(device=torch_device) non_inf_idx = (output != -float("inf")).nonzero().to(device=torch_device) self.assertTrue(torch.allclose(non_inf_expected_output, non_inf_output, atol=1e-12)) self.assertTrue(torch.all(torch.eq(non_inf_expected_idx, non_inf_idx))) # tests whether the function uses filter_value instead of default -inf def test_top_k_top_p_filtering_with_filter_value(self): logits = torch.tensor( [ [ 1, 1, 1, 0.99, # get filtered by top-p filtering 0.98, # get filtered by top-k filtering ] ], dtype=torch.float, device=torch_device, ) expected_output = torch.tensor( [[1, 1, 1, 0, 0]], dtype=torch.float, device=torch_device, ) output = top_k_top_p_filtering(logits, top_k=4, top_p=0.5, filter_value=0.0) self.assertTrue(torch.allclose(expected_output, output, atol=1e-12)) @require_torch class GenerationIntegrationTests(unittest.TestCase): @slow def test_diverse_beam_search(self): article = """Justin Timberlake and Jessica Biel, welcome to parenthood. The celebrity couple announced the arrival of their son, Silas Randall Timberlake, in statements to People. "Silas was the middle name of Timberlake's maternal grandfather Bill Bomar, who died in 2012, while Randall is the musician's own middle name, as well as his father's first," People reports. The couple announced the pregnancy in January, with an Instagram post. It is the first baby for both.""" bart_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn") bart_model = BartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn").to(torch_device) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) outputs = bart_model.generate( input_ids, num_beams=4, num_return_sequences=2, num_beam_groups=4, diversity_penalty=2.0, remove_invalid_values=True, ) generated_text = bart_tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The couple announced the birth of their son, Silas Randall Timberlake, in a statement. Silas was the" " middle name of Timberlake's maternal grandfather Bill Bomar. Randall is the musician's own middle" " name, as well as his father's first. It is the first baby for both of them.", "Justin Timberlake and Jessica Biel have a son. The baby is named Silas Randall Timberlake. It is the" " first child for both. The couple announced the pregnancy in January. The name Silas is the middle" " name of Timberlake's maternal grandfather. It's also his own middle name.", ], ) def test_max_length_backward_compat_greedy(self): article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) max_length = 20 input_ids = input_ids.expand(2, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) input_ids = bart_model._prepare_decoder_input_ids_for_generation( input_ids.shape[0], decoder_start_token_id=bart_model.config.decoder_start_token_id, bos_token_id=bart_model.config.bos_token_id, ) with self.assertWarns(UserWarning): bart_model.greedy_search( input_ids, max_length=max_length, pad_token_id=bart_model.config.pad_token_id, eos_token_id=bart_model.config.eos_token_id, **model_kwargs, ) def test_max_length_backward_compat_sample(self): article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) max_length = 20 input_ids = input_ids.expand(2, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) input_ids = bart_model._prepare_decoder_input_ids_for_generation( input_ids.shape[0], decoder_start_token_id=bart_model.config.decoder_start_token_id, bos_token_id=bart_model.config.bos_token_id, ) with torch.no_grad(): with self.assertWarns(UserWarning): bart_model.sample( input_ids, max_length=max_length, pad_token_id=bart_model.config.pad_token_id, eos_token_id=bart_model.config.eos_token_id, **model_kwargs, ) def test_max_length_backward_compat_beam_search(self): article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) batch_size = 1 max_length = 20 num_beams = 2 input_ids = input_ids.expand(2, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) input_ids = bart_model._prepare_decoder_input_ids_for_generation( input_ids.shape[0], decoder_start_token_id=bart_model.config.decoder_start_token_id, bos_token_id=bart_model.config.bos_token_id, ) beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=torch_device, ) with self.assertWarns(UserWarning): _ = bart_model.beam_search( input_ids, num_beams=num_beams, max_length=max_length, beam_scorer=beam_scorer, **model_kwargs ) def test_max_length_backward_compat_group_beam_search(self): article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) batch_size = 1 max_length = 20 num_beams = 6 num_beam_groups = 3 num_return_sequences = num_beams * batch_size input_ids = input_ids.expand(6, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) input_ids = bart_model._prepare_decoder_input_ids_for_generation( input_ids.shape[0], decoder_start_token_id=bart_model.config.decoder_start_token_id, bos_token_id=bart_model.config.bos_token_id, ) diverse_beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=torch_device, num_beam_hyps_to_keep=num_return_sequences, num_beam_groups=num_beam_groups, ) with self.assertWarns(UserWarning): bart_model.group_beam_search( input_ids, diverse_beam_scorer, num_beams=num_beams, max_length=max_length, **model_kwargs ) def test_max_length_warning_if_different(self): article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) batch_size = 1 max_length = 20 num_beams = 6 num_beam_groups = 3 num_return_sequences = num_beams * batch_size stopping_criteria_max_length = 18 stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=stopping_criteria_max_length)]) # Greedy input_ids = input_ids.expand(6, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) input_ids = bart_model._prepare_decoder_input_ids_for_generation( input_ids.shape[0], decoder_start_token_id=bart_model.config.decoder_start_token_id, bos_token_id=bart_model.config.bos_token_id, ) with self.assertWarns(UserWarning): bart_model.greedy_search( input_ids, max_length=max_length, pad_token_id=bart_model.config.pad_token_id, stopping_criteria=stopping_criteria, eos_token_id=bart_model.config.eos_token_id, **model_kwargs, ) # Sample with self.assertWarns(UserWarning): with torch.no_grad(): bart_model.sample( input_ids, max_length=max_length, stopping_criteria=stopping_criteria, pad_token_id=bart_model.config.pad_token_id, eos_token_id=bart_model.config.eos_token_id, **model_kwargs, ) # Beam beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=torch_device, ) with self.assertWarns(UserWarning): with torch.no_grad(): bart_model.beam_search( input_ids, num_beams=num_beams, stopping_criteria=stopping_criteria, max_length=max_length, beam_scorer=beam_scorer, **model_kwargs, ) # Grouped beam search diverse_beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=torch_device, num_beam_hyps_to_keep=num_return_sequences, num_beam_groups=num_beam_groups, ) with self.assertWarns(UserWarning): bart_model.group_beam_search( input_ids, diverse_beam_scorer, stopping_criteria=stopping_criteria, num_beams=num_beams, max_length=max_length, **model_kwargs, ) def test_beam_search_warning_if_max_length_is_passed(self): article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) batch_size = 1 num_beams = 3 input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) input_ids = input_ids.expand(num_beams, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) # pretend decoder_input_ids correspond to first encoder input id decoder_input_ids = input_ids[:, :1] stopping_criteria_max_length = 18 stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=stopping_criteria_max_length)]) with self.assertWarns(UserWarning): beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=torch_device, max_length=10, ) generated_ids = bart_model.beam_search( decoder_input_ids, num_beams=num_beams, stopping_criteria=stopping_criteria, beam_scorer=beam_scorer, **model_kwargs, ) beam_scorer_no_max_len = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=torch_device, ) generated_ids_no_max_len = bart_model.beam_search( decoder_input_ids, num_beams=num_beams, stopping_criteria=stopping_criteria, beam_scorer=beam_scorer_no_max_len, **model_kwargs, ) # BeamSearchScorer max_length should not influence "real" max_length self.assertEqual(generated_ids.tolist(), generated_ids_no_max_len.tolist()) def test_custom_stopping_criteria_overload_error(self): article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random") bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) stopping_criteria = StoppingCriteriaList() stopping_criteria.append(MaxLengthCriteria(max_length=42)) with self.assertRaises(ValueError): bart_model.generate(input_ids, stopping_criteria=stopping_criteria) with self.assertRaises(ValueError): bart_model.generate(input_ids, stopping_criteria=stopping_criteria, max_length=32) def test_custom_stopping_criteria(self): article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random") bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) class DummyCriteria(StoppingCriteria): def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: return input_ids.shape[-1] >= 20 stopping_criteria = StoppingCriteriaList() stopping_criteria.append(DummyCriteria()) self.assertEqual( list(bart_model.generate(input_ids, stopping_criteria=stopping_criteria, max_length=22).shape), [1, 20], ) self.assertEqual( list(bart_model.generate(input_ids, stopping_criteria=stopping_criteria, max_length=18).shape), [1, 18], ) def test_stop_sequence_stopping_criteria(self): prompt = """Hello I believe in""" generator = pipeline("text-generation", model="hf-internal-testing/tiny-random-bart") output = generator(prompt) self.assertEqual( output, [ { "generated_text": ( "Hello I believe in in in number number number number number number number number number" ) } ], ) output = generator(prompt, stop_sequence=" number") self.assertEqual(output, [{"generated_text": "Hello I believe in in in number"}]) def test_custom_logits_processor(self): bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random") article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random", min_length=1).to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) logits_processor = LogitsProcessorList() logits_processor.append(MinLengthLogitsProcessor(min_length=10, eos_token_id=0)) # it should not be allowed to both define `min_length` via config and `logits_processor` list with self.assertRaises(ValueError): bart_model.generate(input_ids, logits_processor=logits_processor) bart_model.config.min_length = None bart_model.generate(input_ids, logits_processor=logits_processor) def test_max_new_tokens_encoder_decoder(self): article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) self.assertEqual(list(input_ids.shape), [1, 29]) max_new_tokens = 3 bart_model.config.max_length = 20 bart_model.config.eos_token_id = None # Encoder decoder call outputs = bart_model.generate(input_ids, max_new_tokens=max_new_tokens) # 1 BOS + 3 new tokens self.assertEqual(list(outputs.shape), [1, 4]) # Decoder only call outputs = bart_model.generate(decoder_input_ids=input_ids, max_new_tokens=max_new_tokens) # 29 + 3 new tokens self.assertEqual(list(outputs.shape), [1, 32]) # Encoder decoder call > 20 outputs = bart_model.generate(max_new_tokens=max_new_tokens + 20) # 1 BOS + 20 + 3 new tokens self.assertEqual(list(outputs.shape), [1, 24]) # max_new_tokens and max_length serve the same purpose and must not be used together. with self.assertRaises(ValueError): bart_model.generate(decoder_input_ids=input_ids, max_new_tokens=10, max_length=20) def test_max_new_tokens_decoder_only_contrastive_search_t5(self): article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" t5_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") t5_model = T5ForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-t5").to(torch_device) input_ids = t5_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) self.assertEqual(list(input_ids.shape), [1, 56]) max_new_tokens = 3 t5_model.config.max_length = 20 t5_model.config.eos_token_id = None # Encoder decoder call outputs = t5_model.generate(input_ids, max_new_tokens=max_new_tokens, penalty_alpha=0.6, top_k=4) # 1 BOS + 3 new tokens self.assertEqual(list(outputs.shape), [1, 4]) # Decoder only call outputs = t5_model.generate( decoder_input_ids=input_ids, max_new_tokens=max_new_tokens, penalty_alpha=0.6, top_k=4 ) # 56 + 3 new tokens self.assertEqual(list(outputs.shape), [1, 59]) # Encoder decoder call > 20 outputs = t5_model.generate(max_new_tokens=max_new_tokens + 20, penalty_alpha=0.6, top_k=4) # 1 BOS + 20 + 3 new tokens self.assertEqual(list(outputs.shape), [1, 24]) # max_new_tokens and max_length serve the same purpose and must not be used together. with self.assertRaises(ValueError): t5_model.generate( decoder_input_ids=input_ids, max_new_tokens=10, max_length=20, penalty_alpha=0.6, top_k=4 ) def test_max_new_tokens_decoder_only_contrastive_search_bart(self): article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) self.assertEqual(list(input_ids.shape), [1, 29]) max_new_tokens = 3 bart_model.config.max_length = 20 bart_model.config.eos_token_id = None # Encoder decoder call outputs = bart_model.generate(input_ids, max_new_tokens=max_new_tokens, penalty_alpha=0.6, top_k=4) # 1 BOS + 3 new tokens self.assertEqual(list(outputs.shape), [1, 4]) # Decoder only call outputs = bart_model.generate( decoder_input_ids=input_ids, max_new_tokens=max_new_tokens, penalty_alpha=0.6, top_k=4 ) # 29 + 3 new tokens self.assertEqual(list(outputs.shape), [1, 32]) # Encoder decoder call > 20 outputs = bart_model.generate(max_new_tokens=max_new_tokens + 20, penalty_alpha=0.6, top_k=4) # 1 BOS + 20 + 3 new tokens self.assertEqual(list(outputs.shape), [1, 24]) # max_new_tokens and max_length serve the same purpose and must not be used together. with self.assertRaises(ValueError): bart_model.generate( decoder_input_ids=input_ids, max_new_tokens=10, max_length=20, penalty_alpha=0.6, top_k=4 ) def test_max_new_tokens_decoder_only_contrastive_search_gptj(self): article = """Justin Timberlake.""" gptj_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gptj") gptj_model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gptj").to(torch_device) input_ids = gptj_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) self.assertEqual(list(input_ids.shape), [1, 9]) max_new_tokens = 3 gptj_model.config.max_length = 20 # call < 20 outputs = gptj_model.generate(input_ids, max_new_tokens=max_new_tokens, penalty_alpha=0.6, top_k=4) # 9 input_ids + 3 new tokens self.assertEqual(list(outputs.shape), [1, 12]) # call > 20 outputs = gptj_model.generate(max_new_tokens=max_new_tokens + 20, penalty_alpha=0.6, top_k=4) # 1 BOS token + 23 new tokens self.assertEqual(list(outputs.shape), [1, 24]) # max_new_tokens and max_length serve the same purpose and must not be used together. with self.assertRaises(ValueError): gptj_model.generate(input_ids=input_ids, max_new_tokens=10, max_length=20, penalty_alpha=0.6, top_k=4) def test_max_new_tokens_decoder_only_contrastive_search_gpt2(self): article = """Justin Timberlake.""" gpt2_tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") gpt2_model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) input_ids = gpt2_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) self.assertEqual(list(input_ids.shape), [1, 9]) max_new_tokens = 3 gpt2_model.config.max_length = 20 # call < 20 outputs = gpt2_model.generate(input_ids, max_new_tokens=max_new_tokens, penalty_alpha=0.6, top_k=4) # 9 input_ids + 3 new tokens self.assertEqual(list(outputs.shape), [1, 12]) # call > 20 outputs = gpt2_model.generate(max_new_tokens=max_new_tokens + 20, penalty_alpha=0.6, top_k=4) # 1 BOS token + 23 new tokens self.assertEqual(list(outputs.shape), [1, 24]) # max_new_tokens and max_length serve the same purpose and must not be used together. with self.assertRaises(ValueError): gpt2_model.generate(input_ids=input_ids, max_new_tokens=10, max_length=20, penalty_alpha=0.6, top_k=4) def test_max_new_tokens_decoder_only(self): article = """Justin Timberlake.""" gpt2_tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") gpt2_model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) input_ids = gpt2_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) self.assertEqual(list(input_ids.shape), [1, 9]) max_new_tokens = 3 gpt2_model.config.max_length = 20 # call < 20 outputs = gpt2_model.generate(input_ids, max_new_tokens=max_new_tokens) # 9 input_ids + 3 new tokens self.assertEqual(list(outputs.shape), [1, 12]) # call > 20 outputs = gpt2_model.generate(max_new_tokens=max_new_tokens + 20) # 1 BOS token + 23 new tokens self.assertEqual(list(outputs.shape), [1, 24]) # max_new_tokens and max_length serve the same purpose and must not be used together. with self.assertRaises(ValueError): gpt2_model.generate(input_ids=input_ids, max_new_tokens=10, max_length=20) def test_encoder_decoder_generate_with_inputs_embeds(self): article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart", max_length=5).to( torch_device ) model.config.eos_token_id = None input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device) inputs_embeds = model.get_input_embeddings()(input_ids) output_sequences = model.generate(inputs_embeds=inputs_embeds) # make sure model generated correctly until `max_length` self.assertEqual(output_sequences.shape, (1, 5)) def test_encoder_decoder_generate_attention_mask(self): articles = ["Timberlake", "Jessica Biel, welcome to parenthood among other things"] tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") # need extrem generation values here to force this test # to fail when `attention_mask` is not correctly treated in generate model = BartForConditionalGeneration.from_pretrained( "hf-internal-testing/tiny-random-bart", max_length=50, num_beams=5, num_return_sequences=5 ).to(torch_device) model.config.eos_token_id = None input_ids = tokenizer(articles[0], return_tensors="pt").input_ids.to(torch_device) input_ids_batched = tokenizer(articles, padding=True, return_tensors="pt").input_ids.to(torch_device) output_sequences_batched = model.generate( input_ids=input_ids_batched, return_dict_in_generate=True, output_scores=True ) output_sequences = model.generate(input_ids=input_ids, return_dict_in_generate=True, output_scores=True) batched_out = output_sequences_batched.sequences_scores out = output_sequences.sequences_scores diff = (batched_out[:5].sum() - out.sum()).abs() self.assertTrue(diff < 1e-4) def test_decoder_generate_with_inputs_embeds(self): article = """I need input_ids to generate""" tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2", max_length=5).to(torch_device) input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device) inputs_embeds = model.get_input_embeddings()(input_ids) # cannot generate from `inputs_embeds` for decoder only with self.assertRaises(ValueError): model.generate(inputs_embeds=inputs_embeds) def test_generate_input_ids_as_kwarg(self): article = """I need input_ids to generate""" tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2", max_length=15).to(torch_device) input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device) output_sequences_kwargs = model.generate(input_ids=input_ids).cpu() output_sequences = model.generate(input_ids).cpu() self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist()) self.assertEqual(output_sequences.shape, (1, 15)) def test_generate_non_nlp_input_ids_as_kwarg(self): model = ImageGPTForCausalImageModeling.from_pretrained( "hf-internal-testing/tiny-random-imagegpt", max_length=10 ).to(torch_device) input_ids = ids_tensor((3, 5), vocab_size=10) output_sequences_kwargs = model.generate(input_ids=input_ids).cpu() output_sequences = model.generate(input_ids).cpu() self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist()) self.assertEqual(output_sequences.shape, (3, 10)) def test_generate_input_ids_as_encoder_kwarg(self): article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart", max_length=5).to( torch_device ) model.config.eos_token_id = None input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device) output_sequences_kwargs = model.generate(input_ids=input_ids).cpu() output_sequences = model.generate(input_ids).cpu() self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist()) self.assertEqual(output_sequences.shape, (1, 5)) def test_generate_inputs_and_encoder_kwargs(self): article = """I need input_ids to generate""" tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2", max_length=10).to(torch_device) input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device) with self.assertRaises(ValueError): model.generate(input_ids, input_ids=input_ids) def test_generate_too_many_encoder_kwargs(self): article = """I need input_ids to generate""" tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2", max_length=10).to(torch_device) input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device) with self.assertRaises(ValueError): model.generate(input_ids=input_ids, inputs_embeds=input_ids) def test_generate_input_values_as_encoder_kwarg(self): input_values = floats_tensor((2, 250)) model = SpeechEncoderDecoderModel.from_pretrained("hf-internal-testing/tiny-random-speech-encoder-decoder") model = model.to(torch_device) output_sequences_kwargs = model.generate(input_values=input_values, max_length=5).cpu() output_sequences = model.generate(input_values, max_length=5).cpu() self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist()) self.assertEqual(output_sequences.shape, (2, 5)) def test_generate_input_features_as_encoder_kwarg(self): input_features = floats_tensor((3, 20, 24)) model = Speech2TextForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-speech_to_text") model = model.to(torch_device) output_sequences_kwargs = model.generate(input_features=input_features, max_length=5).cpu() output_sequences = model.generate(input_features, max_length=5).cpu() self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist()) self.assertEqual(output_sequences.shape, (3, 5)) def test_generate_pixel_values_as_encoder_kwarg(self): pixel_values = floats_tensor((2, 3, 30, 30)) model = VisionEncoderDecoderModel.from_pretrained("hf-internal-testing/tiny-random-vision-encoder-decoder") model = model.to(torch_device) output_sequences_kwargs = model.generate(pixel_values=pixel_values, max_length=5).cpu() output_sequences = model.generate(pixel_values, max_length=5).cpu() self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist()) self.assertEqual(output_sequences.shape, (2, 5)) def test_generate_encoder_outputs_attention_mask(self): input_values = floats_tensor((2, 250)).to(torch_device) attention_mask = torch.ones_like(input_values) model = SpeechEncoderDecoderModel.from_pretrained("hf-internal-testing/tiny-random-speech-encoder-decoder") model = model.to(torch_device) encoder = model.get_encoder() encoder_outputs = encoder(input_values) output_sequences_no_mask = model.generate(encoder_outputs=encoder_outputs).cpu() output_sequences_with_mask = model.generate(encoder_outputs=encoder_outputs, attention_mask=attention_mask) output_sequences_with_mask = output_sequences_with_mask.cpu() self.assertListEqual(output_sequences_no_mask.tolist(), output_sequences_with_mask.tolist()) def test_transition_scores_beam_search_encoder_decoder(self): articles = [ "Justin Timberlake and Jessica Biel, welcome to parenthood.", "Michael Phelps is arguably the most decorated Olympian of all time.", ] tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = BartForConditionalGeneration.from_pretrained( "hf-internal-testing/tiny-random-bart", max_length=10, num_beams=4, num_return_sequences=2, eos_token_id=None, return_dict_in_generate=True, output_scores=True, length_penalty=0.0, ) model = model.to(torch_device) input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids.to(torch_device) outputs = model.generate(input_ids=input_ids) transition_scores = model.compute_transition_beam_scores( outputs.sequences, outputs.scores, outputs.beam_indices ) transition_scores_sum = transition_scores.sum(-1) self.assertTrue(torch.allclose(transition_scores_sum, outputs.sequences_scores, atol=1e-3)) def test_transition_scores_beam_search_encoder_decoder_with_eos(self): articles = [ "Justin Timberlake and Jessica Biel, welcome to parenthood.", "Michael Phelps is arguably the most decorated Olympian of all time.", ] tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = BartForConditionalGeneration.from_pretrained( "hf-internal-testing/tiny-random-bart", max_length=10, num_beams=4, num_return_sequences=2, return_dict_in_generate=True, output_scores=True, length_penalty=0.0, ) model = model.to(torch_device) input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids.to(torch_device) outputs = model.generate(input_ids=input_ids) transition_scores = model.compute_transition_beam_scores( outputs.sequences, outputs.scores, outputs.beam_indices ) transition_scores_sum = transition_scores.sum(-1) self.assertTrue(torch.allclose(transition_scores_sum, outputs.sequences_scores, atol=1e-3)) def test_transition_scores_beam_search_decoder_only(self): articles = [ "Justin Timberlake", "Michael Phelps", ] tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") tokenizer.pad_token = tokenizer.eos_token model = GPT2LMHeadModel.from_pretrained( "hf-internal-testing/tiny-random-gpt2", max_length=10, num_beams=4, num_return_sequences=2, pad_token_id=tokenizer.eos_token_id, eos_token_id=None, return_dict_in_generate=True, output_scores=True, length_penalty=0.0, ) model = model.to(torch_device) input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids.to(torch_device) outputs = model.generate(input_ids=input_ids) transition_scores = model.compute_transition_beam_scores( outputs.sequences, outputs.scores, outputs.beam_indices ) transition_scores_sum = transition_scores.sum(-1) self.assertTrue(torch.allclose(transition_scores_sum, outputs.sequences_scores, atol=1e-3)) def test_transition_scores_beam_sample_encoder_decoder(self): articles = [ "Justin Timberlake and Jessica Biel, welcome to parenthood.", "Michael Phelps is arguably the most decorated Olympian of all time.", ] tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = BartForConditionalGeneration.from_pretrained( "hf-internal-testing/tiny-random-bart", do_sample=True, max_length=10, num_beams=4, num_return_sequences=2, eos_token_id=None, return_dict_in_generate=True, output_scores=True, length_penalty=0.0, ) model = model.to(torch_device) input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids.to(torch_device) outputs = model.generate(input_ids=input_ids) transition_scores = model.compute_transition_beam_scores( outputs.sequences, outputs.scores, outputs.beam_indices ) transition_scores_sum = transition_scores.sum(-1) self.assertTrue(torch.allclose(transition_scores_sum, outputs.sequences_scores, atol=1e-3)) def test_transition_scores_group_beam_search_encoder_decoder(self): articles = [ "Justin Timberlake and Jessica Biel, welcome to parenthood.", "Michael Phelps is arguably the most decorated Olympian of all time.", ] tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = BartForConditionalGeneration.from_pretrained( "hf-internal-testing/tiny-random-bart", max_length=10, num_beams=2, num_beam_groups=2, num_return_sequences=2, eos_token_id=None, return_dict_in_generate=True, output_scores=True, length_penalty=0.0, ) model = model.to(torch_device) input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids.to(torch_device) outputs = model.generate(input_ids=input_ids) transition_scores = model.compute_transition_beam_scores( outputs.sequences, outputs.scores, outputs.beam_indices ) transition_scores_sum = transition_scores.sum(-1) self.assertTrue(torch.allclose(transition_scores_sum, outputs.sequences_scores, atol=1e-3)) @slow def test_transition_scores_early_stopping(self): # This is an aggressive test that makes sure that `beam_search's` # transition scores are computed correctly for varying `num_return_sequences`, # `num_beams` and `batch_size > 1` # 2 x input_ids for "question: How are you? \n context: I had a long day, " input_ids = torch.tensor(2 * [[822, 10, 571, 33, 25, 58, 2625, 10, 27, 141, 3, 9, 307, 239, 6, 1]]).to( torch_device ) model = AutoModelForSeq2SeqLM.from_pretrained("t5-small").to(torch_device) result = model.generate( input_ids, max_length=10, return_dict_in_generate=True, output_scores=True, forced_eos_token_id=model.config.eos_token_id, num_beams=4, do_sample=False, num_return_sequences=3, length_penalty=0.0, ) transition_scores = model.compute_transition_beam_scores( sequences=result.sequences, scores=result.scores, beam_indices=result.beam_indices ) sum_transition_scores = torch.sum(transition_scores, dim=1) self.assertListEqual(sum_transition_scores.cpu().tolist(), result.sequences_scores.cpu().tolist()) def test_log_scores_sample_decoder_only(self): articles = ["I need input_ids to generate", "Short and"] tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") tokenizer.padding_side = "left" tokenizer.pad_token = tokenizer.eos_token model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) inputs = tokenizer(articles, return_tensors="pt", padding=True).to(torch_device) result = model.generate( **inputs, max_length=15, return_dict_in_generate=True, do_sample=False, output_scores=True, ) # decoder-only starts generating from `input_ids` begin_generation = inputs.input_ids.shape[-1] gen_sequences = result.sequences[:, begin_generation:] probs = torch.stack(result.scores, dim=1).softmax(-1) gen_probs = torch.gather(probs, 2, gen_sequences[:, :, None]).squeeze(-1) expected_probs = torch.tensor([[0.0014, 0.0015], [0.0014, 0.0014]]) self.assertTrue(torch.allclose(gen_probs.cpu(), expected_probs, atol=1e-3)) def test_log_scores_sample_encoder_decoder(self): articles = ["I need input_ids to generate", "Short and"] tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to(torch_device) inputs = tokenizer(articles, return_tensors="pt", padding=True).to(torch_device) result = model.generate( **inputs, max_length=3, return_dict_in_generate=True, do_sample=False, num_beams=1, output_scores=True, ) # encoder-decoder has one decoder_start_token_id by default begin_generation = 1 gen_sequences = result.sequences[:, begin_generation:] probs = torch.stack(result.scores, dim=1).softmax(-1) gen_probs = torch.gather(probs, 2, gen_sequences[:, :, None]).squeeze(-1) expected_probs = torch.tensor([[0.0013, 1.0000], [0.0013, 1.0000]]) self.assertTrue(torch.allclose(gen_probs.cpu(), expected_probs, atol=1e-3)) @slow def test_beam_search_example_integration(self): # exactly the example provided in the docstrings of beam search, which previously # failed after directly copying from it. Refer to PR #15555 tokenizer = AutoTokenizer.from_pretrained("t5-base") model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") encoder_input_str = "translate English to German: How old are you?" encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids # lets run beam search using 3 beams num_beams = 3 # define decoder start token ids input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) input_ids = input_ids * model.config.decoder_start_token_id # add encoder_outputs to model keyword arguments model_kwargs = { "encoder_outputs": model.get_encoder()( encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ) } # instantiate beam scorer beam_scorer = BeamSearchScorer( batch_size=1, num_beams=num_beams, device=model.device, ) # instantiate logits processors logits_processor = LogitsProcessorList( [ MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), ] ) outputs = model.beam_search(input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs) outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual(outputs, ["Wie alt bist du?"]) @slow def test_constrained_beam_search(self): model = GPT2LMHeadModel.from_pretrained("gpt2").to(torch_device) tokenizer = GPT2Tokenizer.from_pretrained("gpt2") force_tokens = tokenizer("scared", add_prefix_space=True, add_special_tokens=False).input_ids force_tokens_2 = tokenizer("big weapons", add_prefix_space=True, add_special_tokens=False).input_ids constraints = [ PhrasalConstraint(force_tokens), PhrasalConstraint(force_tokens_2), ] starting_text = ["The soldiers were not prepared and"] input_ids = tokenizer(starting_text, return_tensors="pt").input_ids.to(torch_device) outputs = model.generate( input_ids, constraints=constraints, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, max_length=30, remove_invalid_values=True, ) generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The soldiers were not prepared and didn't know what to do. They had no idea how they would react if" " the enemy attacked them, big weapons scared" ], ) @slow def test_constrained_beam_search_mixed(self): model = GPT2LMHeadModel.from_pretrained("gpt2").to(torch_device) tokenizer = GPT2Tokenizer.from_pretrained("gpt2") force_phrase = tokenizer("scared", add_prefix_space=True, add_special_tokens=False).input_ids flexible_phrases = tokenizer( ["scream", "screams", "screaming", "screamed"], add_prefix_space=True, add_special_tokens=False ).input_ids constraints = [ PhrasalConstraint(force_phrase), DisjunctiveConstraint(flexible_phrases), ] starting_text = ["The soldiers", "The child"] input_ids = tokenizer(starting_text, return_tensors="pt").input_ids.to(torch_device) outputs = model.generate( input_ids, constraints=constraints, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, # max_length=20, remove_invalid_values=True, ) generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The soldiers, who had been stationed at the base for more than a year before being evacuated" " screaming scared", "The child was taken to a local hospital where he died.\n 'I don't think screaming scared", ], ) @slow def test_constrained_beam_search_mixed_mixin(self): model = GPT2LMHeadModel.from_pretrained("gpt2").to(torch_device) tokenizer = GPT2Tokenizer.from_pretrained("gpt2") force_word = "scared" force_flexible = ["scream", "screams", "screaming", "screamed"] force_words_ids = [ tokenizer([force_word], add_prefix_space=True, add_special_tokens=False).input_ids, tokenizer(force_flexible, add_prefix_space=True, add_special_tokens=False).input_ids, ] starting_text = ["The soldiers", "The child"] input_ids = tokenizer(starting_text, return_tensors="pt").input_ids.to(torch_device) outputs = model.generate( input_ids, force_words_ids=force_words_ids, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, remove_invalid_values=True, ) generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The soldiers, who had been stationed at the base for more than a year before being evacuated" " screaming scared", "The child was taken to a local hospital where he died.\n 'I don't think screaming scared", ], ) @slow def test_constrained_beam_search_example_translation_mixin(self): tokenizer = AutoTokenizer.from_pretrained("t5-base") model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") encoder_input_str = "translate English to German: How old are you?" force_words = ["sind"] input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids force_words_ids = tokenizer(force_words, add_special_tokens=False).input_ids outputs = model.generate( input_ids, force_words_ids=force_words_ids, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, remove_invalid_values=True, ) outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual(outputs, ["Wie alt sind Sie?"]) @slow def test_constrained_beam_search_example_integration(self): tokenizer = AutoTokenizer.from_pretrained("t5-base") model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") encoder_input_str = "translate English to German: How old are you?" encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids # lets run beam search using 5 beams num_beams = 5 # define decoder start token ids input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) input_ids = input_ids * model.config.decoder_start_token_id # add encoder_outputs to model keyword arguments model_kwargs = { "encoder_outputs": model.get_encoder()( encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ) } constraint_str = "sind" constraint_token_ids = tokenizer.encode(constraint_str)[:-1] # remove eos token constraints = [PhrasalConstraint(token_ids=constraint_token_ids)] # instantiate beam scorer beam_scorer = ConstrainedBeamSearchScorer( batch_size=1, num_beams=num_beams, device=model.device, constraints=constraints ) # instantiate logits processors logits_processor = LogitsProcessorList( [ MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), ] ) outputs = model.constrained_beam_search( input_ids, beam_scorer, constraints=constraints, logits_processor=logits_processor, **model_kwargs ) outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual(outputs, ["Wie alt sind Sie?"]) def test_constrained_beam_search_mixin_type_checks(self): tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/t5-tiny-random") model = AutoModelForSeq2SeqLM.from_pretrained("patrickvonplaten/t5-tiny-random") encoder_input_str = "translate English to German: How old are you?" input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids with self.assertRaises(ValueError): force_words = ["sind"] force_words_ids = tokenizer(force_words, return_tensors="pt").input_ids model.generate( input_ids, force_words_ids=force_words_ids, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, remove_invalid_values=True, ) with self.assertRaises(ValueError): force_words = ["sind"] force_words_ids = [tokenizer(force_words, return_tensors="pt").input_ids] model.generate( input_ids, force_words_ids=force_words_ids, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, remove_invalid_values=True, ) with self.assertRaises(ValueError): model.generate(input_ids, force_words_ids=[]) with self.assertRaises(ValueError): model.generate(input_ids, force_words_ids=[[-1]]) with self.assertRaises(ValueError): model.generate(input_ids, force_words_ids=[[[-1]]]) def test_contrastive_search_batched(self): # Tests that contrastive search works with batched inputs (i.e. has the same output as for non-batched inputs) articles = ["Foo", "Bar Baz"] tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to(torch_device) model.config.eos_token_id = None input_ids_batched = tokenizer(articles, padding=True, return_tensors="pt").input_ids.to(torch_device) input_ids = tokenizer(articles[1], return_tensors="pt").input_ids.to(torch_device) output_sequences_batched = model.generate( input_ids=input_ids_batched, penalty_alpha=0.6, top_k=4, return_dict_in_generate=True, output_scores=True ) output_sequences = model.generate( input_ids=input_ids, penalty_alpha=0.6, top_k=4, return_dict_in_generate=True, output_scores=True ) batched_out = tokenizer.decode(output_sequences_batched.sequences[1], skip_special_tokens=True) out = tokenizer.decode(output_sequences.sequences[0], skip_special_tokens=True) self.assertEqual(batched_out, out) # output_sequences_batched.scores[0][1] -> 1st set of logits, 2nd sequence max_score_diff = (output_sequences_batched.scores[0][1] - output_sequences.scores[0][0]).abs().max() self.assertTrue(max_score_diff < 1e-5) def test_validate_generation_inputs(self): tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") model = AutoModelForSeq2SeqLM.from_pretrained("hf-internal-testing/tiny-random-t5") encoder_input_str = "Hello world" input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(ValueError, "do_samples"): model.generate(input_ids, do_samples=True) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(ValueError, "foo"): fake_model_kwargs = {"foo": "bar"} model.generate(input_ids, **fake_model_kwargs)
-1
huggingface/transformers
20,304
fix device issue
# What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
ydshieh
"2022-11-17T17:48:16Z"
"2022-11-21T09:12:26Z"
d316037ad71f8748aac9045ffd96970826456a04
8503cc755050c6ed5bc771e3244c29b71be1841e
fix device issue. # What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
./examples/pytorch/text-classification/run_glue.py
#!/usr/bin/env python # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Finetuning the library models for sequence classification on GLUE.""" # You can also adapt this script on your own text classification task. Pointers for this are left as comments. import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np from datasets import load_dataset import evaluate import transformers from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, PretrainedConfig, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.25.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") task_to_keys = { "cola": ("sentence", None), "mnli": ("premise", "hypothesis"), "mrpc": ("sentence1", "sentence2"), "qnli": ("question", "sentence"), "qqp": ("question1", "question2"), "rte": ("sentence1", "sentence2"), "sst2": ("sentence", None), "stsb": ("sentence1", "sentence2"), "wnli": ("sentence1", "sentence2"), } logger = logging.getLogger(__name__) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. """ task_name: Optional[str] = field( default=None, metadata={"help": "The name of the task to train on: " + ", ".join(task_to_keys.keys())}, ) dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) max_seq_length: int = field( default=128, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) pad_to_max_length: bool = field( default=True, metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) max_predict_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) }, ) train_file: Optional[str] = field( default=None, metadata={"help": "A csv or a json file containing the training data."} ) validation_file: Optional[str] = field( default=None, metadata={"help": "A csv or a json file containing the validation data."} ) test_file: Optional[str] = field(default=None, metadata={"help": "A csv or a json file containing the test data."}) def __post_init__(self): if self.task_name is not None: self.task_name = self.task_name.lower() if self.task_name not in task_to_keys.keys(): raise ValueError("Unknown task, you should pick one in " + ",".join(task_to_keys.keys())) elif self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError("Need either a GLUE task, a training/validation file or a dataset name.") else: train_extension = self.train_file.split(".")[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." validation_extension = self.validation_file.split(".")[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) ignore_mismatched_sizes: bool = field( default=False, metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."}, ) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_glue", model_args, data_args) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the # sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named # label if at least two columns are provided. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.task_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( "glue", data_args.task_name, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) elif data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. data_files = {"train": data_args.train_file, "validation": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: train_extension = data_args.train_file.split(".")[-1] test_extension = data_args.test_file.split(".")[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." data_files["test"] = data_args.test_file else: raise ValueError("Need either a GLUE task or a test file for `do_predict`.") for key in data_files.keys(): logger.info(f"load a local file for {key}: {data_files[key]}") if data_args.train_file.endswith(".csv"): # Loading a dataset from local csv files raw_datasets = load_dataset( "csv", data_files=data_files, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) else: # Loading a dataset from local json files raw_datasets = load_dataset( "json", data_files=data_files, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels if data_args.task_name is not None: is_regression = data_args.task_name == "stsb" if not is_regression: label_list = raw_datasets["train"].features["label"].names num_labels = len(label_list) else: num_labels = 1 else: # Trying to have good defaults here, don't hesitate to tweak to your needs. is_regression = raw_datasets["train"].features["label"].dtype in ["float32", "float64"] if is_regression: num_labels = 1 else: # A useful fast method: # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique label_list = raw_datasets["train"].unique("label") label_list.sort() # Let's sort it for determinism num_labels = len(label_list) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) model = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, ) # Preprocessing the raw_datasets if data_args.task_name is not None: sentence1_key, sentence2_key = task_to_keys[data_args.task_name] else: # Again, we try to have some nice defaults but don't hesitate to tweak to your use case. non_label_column_names = [name for name in raw_datasets["train"].column_names if name != "label"] if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names: sentence1_key, sentence2_key = "sentence1", "sentence2" else: if len(non_label_column_names) >= 2: sentence1_key, sentence2_key = non_label_column_names[:2] else: sentence1_key, sentence2_key = non_label_column_names[0], None # Padding strategy if data_args.pad_to_max_length: padding = "max_length" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch padding = False # Some models have set the order of the labels to use, so let's make sure we do use it. label_to_id = None if ( model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id and data_args.task_name is not None and not is_regression ): # Some have all caps in their config, some don't. label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()} if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)): label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)} else: logger.warning( "Your model seems to have been trained with labels, but they don't match the dataset: ", f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}." "\nIgnoring the model labels as a result.", ) elif data_args.task_name is None and not is_regression: label_to_id = {v: i for i, v in enumerate(label_list)} if label_to_id is not None: model.config.label2id = label_to_id model.config.id2label = {id: label for label, id in config.label2id.items()} elif data_args.task_name is not None and not is_regression: model.config.label2id = {l: i for i, l in enumerate(label_list)} model.config.id2label = {id: label for label, id in config.label2id.items()} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) def preprocess_function(examples): # Tokenize the texts args = ( (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key]) ) result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True) # Map labels to IDs (not necessary for GLUE tasks) if label_to_id is not None and "label" in examples: result["label"] = [(label_to_id[l] if l != -1 else -1) for l in examples["label"]] return result with training_args.main_process_first(desc="dataset map pre-processing"): raw_datasets = raw_datasets.map( preprocess_function, batched=True, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on dataset", ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset") train_dataset = raw_datasets["train"] if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = raw_datasets["validation_matched" if data_args.task_name == "mnli" else "validation"] if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) if training_args.do_predict or data_args.task_name is not None or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("--do_predict requires a test dataset") predict_dataset = raw_datasets["test_matched" if data_args.task_name == "mnli" else "test"] if data_args.max_predict_samples is not None: max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples) predict_dataset = predict_dataset.select(range(max_predict_samples)) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(train_dataset)), 3): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") # Get the metric function if data_args.task_name is not None: metric = evaluate.load("glue", data_args.task_name) else: metric = evaluate.load("accuracy") # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(p: EvalPrediction): preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1) if data_args.task_name is not None: result = metric.compute(predictions=preds, references=p.label_ids) if len(result) > 1: result["combined_score"] = np.mean(list(result.values())).item() return result elif is_regression: return {"mse": ((preds - p.label_ids) ** 2).mean().item()} else: return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()} # Data collator will default to DataCollatorWithPadding when the tokenizer is passed to Trainer, so we change it if # we already did the padding. if data_args.pad_to_max_length: data_collator = default_data_collator elif training_args.fp16: data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8) else: data_collator = None # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, compute_metrics=compute_metrics, tokenizer=tokenizer, data_collator=data_collator, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") # Loop to handle MNLI double evaluation (matched, mis-matched) tasks = [data_args.task_name] eval_datasets = [eval_dataset] if data_args.task_name == "mnli": tasks.append("mnli-mm") valid_mm_dataset = raw_datasets["validation_mismatched"] if data_args.max_eval_samples is not None: max_eval_samples = min(len(valid_mm_dataset), data_args.max_eval_samples) valid_mm_dataset = valid_mm_dataset.select(range(max_eval_samples)) eval_datasets.append(valid_mm_dataset) combined = {} for eval_dataset, task in zip(eval_datasets, tasks): metrics = trainer.evaluate(eval_dataset=eval_dataset) max_eval_samples = ( data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) ) metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) if task == "mnli-mm": metrics = {k + "_mm": v for k, v in metrics.items()} if task is not None and "mnli" in task: combined.update(metrics) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", combined if task is not None and "mnli" in task else metrics) if training_args.do_predict: logger.info("*** Predict ***") # Loop to handle MNLI double evaluation (matched, mis-matched) tasks = [data_args.task_name] predict_datasets = [predict_dataset] if data_args.task_name == "mnli": tasks.append("mnli-mm") predict_datasets.append(raw_datasets["test_mismatched"]) for predict_dataset, task in zip(predict_datasets, tasks): # Removing the `label` columns because it contains -1 and Trainer won't like that. predict_dataset = predict_dataset.remove_columns("label") predictions = trainer.predict(predict_dataset, metric_key_prefix="predict").predictions predictions = np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1) output_predict_file = os.path.join(training_args.output_dir, f"predict_results_{task}.txt") if trainer.is_world_process_zero(): with open(output_predict_file, "w") as writer: logger.info(f"***** Predict results {task} *****") writer.write("index\tprediction\n") for index, item in enumerate(predictions): if is_regression: writer.write(f"{index}\t{item:3.3f}\n") else: item = label_list[item] writer.write(f"{index}\t{item}\n") kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"} if data_args.task_name is not None: kwargs["language"] = "en" kwargs["dataset_tags"] = "glue" kwargs["dataset_args"] = data_args.task_name kwargs["dataset"] = f"GLUE {data_args.task_name.upper()}" if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
#!/usr/bin/env python # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Finetuning the library models for sequence classification on GLUE.""" # You can also adapt this script on your own text classification task. Pointers for this are left as comments. import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np from datasets import load_dataset import evaluate import transformers from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, PretrainedConfig, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.25.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") task_to_keys = { "cola": ("sentence", None), "mnli": ("premise", "hypothesis"), "mrpc": ("sentence1", "sentence2"), "qnli": ("question", "sentence"), "qqp": ("question1", "question2"), "rte": ("sentence1", "sentence2"), "sst2": ("sentence", None), "stsb": ("sentence1", "sentence2"), "wnli": ("sentence1", "sentence2"), } logger = logging.getLogger(__name__) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. """ task_name: Optional[str] = field( default=None, metadata={"help": "The name of the task to train on: " + ", ".join(task_to_keys.keys())}, ) dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) max_seq_length: int = field( default=128, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) pad_to_max_length: bool = field( default=True, metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) max_predict_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) }, ) train_file: Optional[str] = field( default=None, metadata={"help": "A csv or a json file containing the training data."} ) validation_file: Optional[str] = field( default=None, metadata={"help": "A csv or a json file containing the validation data."} ) test_file: Optional[str] = field(default=None, metadata={"help": "A csv or a json file containing the test data."}) def __post_init__(self): if self.task_name is not None: self.task_name = self.task_name.lower() if self.task_name not in task_to_keys.keys(): raise ValueError("Unknown task, you should pick one in " + ",".join(task_to_keys.keys())) elif self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError("Need either a GLUE task, a training/validation file or a dataset name.") else: train_extension = self.train_file.split(".")[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." validation_extension = self.validation_file.split(".")[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) ignore_mismatched_sizes: bool = field( default=False, metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."}, ) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_glue", model_args, data_args) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the # sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named # label if at least two columns are provided. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.task_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( "glue", data_args.task_name, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) elif data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. data_files = {"train": data_args.train_file, "validation": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: train_extension = data_args.train_file.split(".")[-1] test_extension = data_args.test_file.split(".")[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." data_files["test"] = data_args.test_file else: raise ValueError("Need either a GLUE task or a test file for `do_predict`.") for key in data_files.keys(): logger.info(f"load a local file for {key}: {data_files[key]}") if data_args.train_file.endswith(".csv"): # Loading a dataset from local csv files raw_datasets = load_dataset( "csv", data_files=data_files, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) else: # Loading a dataset from local json files raw_datasets = load_dataset( "json", data_files=data_files, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels if data_args.task_name is not None: is_regression = data_args.task_name == "stsb" if not is_regression: label_list = raw_datasets["train"].features["label"].names num_labels = len(label_list) else: num_labels = 1 else: # Trying to have good defaults here, don't hesitate to tweak to your needs. is_regression = raw_datasets["train"].features["label"].dtype in ["float32", "float64"] if is_regression: num_labels = 1 else: # A useful fast method: # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique label_list = raw_datasets["train"].unique("label") label_list.sort() # Let's sort it for determinism num_labels = len(label_list) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) model = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, ) # Preprocessing the raw_datasets if data_args.task_name is not None: sentence1_key, sentence2_key = task_to_keys[data_args.task_name] else: # Again, we try to have some nice defaults but don't hesitate to tweak to your use case. non_label_column_names = [name for name in raw_datasets["train"].column_names if name != "label"] if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names: sentence1_key, sentence2_key = "sentence1", "sentence2" else: if len(non_label_column_names) >= 2: sentence1_key, sentence2_key = non_label_column_names[:2] else: sentence1_key, sentence2_key = non_label_column_names[0], None # Padding strategy if data_args.pad_to_max_length: padding = "max_length" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch padding = False # Some models have set the order of the labels to use, so let's make sure we do use it. label_to_id = None if ( model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id and data_args.task_name is not None and not is_regression ): # Some have all caps in their config, some don't. label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()} if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)): label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)} else: logger.warning( "Your model seems to have been trained with labels, but they don't match the dataset: ", f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}." "\nIgnoring the model labels as a result.", ) elif data_args.task_name is None and not is_regression: label_to_id = {v: i for i, v in enumerate(label_list)} if label_to_id is not None: model.config.label2id = label_to_id model.config.id2label = {id: label for label, id in config.label2id.items()} elif data_args.task_name is not None and not is_regression: model.config.label2id = {l: i for i, l in enumerate(label_list)} model.config.id2label = {id: label for label, id in config.label2id.items()} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) def preprocess_function(examples): # Tokenize the texts args = ( (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key]) ) result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True) # Map labels to IDs (not necessary for GLUE tasks) if label_to_id is not None and "label" in examples: result["label"] = [(label_to_id[l] if l != -1 else -1) for l in examples["label"]] return result with training_args.main_process_first(desc="dataset map pre-processing"): raw_datasets = raw_datasets.map( preprocess_function, batched=True, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on dataset", ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset") train_dataset = raw_datasets["train"] if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = raw_datasets["validation_matched" if data_args.task_name == "mnli" else "validation"] if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) if training_args.do_predict or data_args.task_name is not None or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("--do_predict requires a test dataset") predict_dataset = raw_datasets["test_matched" if data_args.task_name == "mnli" else "test"] if data_args.max_predict_samples is not None: max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples) predict_dataset = predict_dataset.select(range(max_predict_samples)) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(train_dataset)), 3): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") # Get the metric function if data_args.task_name is not None: metric = evaluate.load("glue", data_args.task_name) else: metric = evaluate.load("accuracy") # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(p: EvalPrediction): preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1) if data_args.task_name is not None: result = metric.compute(predictions=preds, references=p.label_ids) if len(result) > 1: result["combined_score"] = np.mean(list(result.values())).item() return result elif is_regression: return {"mse": ((preds - p.label_ids) ** 2).mean().item()} else: return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()} # Data collator will default to DataCollatorWithPadding when the tokenizer is passed to Trainer, so we change it if # we already did the padding. if data_args.pad_to_max_length: data_collator = default_data_collator elif training_args.fp16: data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8) else: data_collator = None # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, compute_metrics=compute_metrics, tokenizer=tokenizer, data_collator=data_collator, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") # Loop to handle MNLI double evaluation (matched, mis-matched) tasks = [data_args.task_name] eval_datasets = [eval_dataset] if data_args.task_name == "mnli": tasks.append("mnli-mm") valid_mm_dataset = raw_datasets["validation_mismatched"] if data_args.max_eval_samples is not None: max_eval_samples = min(len(valid_mm_dataset), data_args.max_eval_samples) valid_mm_dataset = valid_mm_dataset.select(range(max_eval_samples)) eval_datasets.append(valid_mm_dataset) combined = {} for eval_dataset, task in zip(eval_datasets, tasks): metrics = trainer.evaluate(eval_dataset=eval_dataset) max_eval_samples = ( data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) ) metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) if task == "mnli-mm": metrics = {k + "_mm": v for k, v in metrics.items()} if task is not None and "mnli" in task: combined.update(metrics) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", combined if task is not None and "mnli" in task else metrics) if training_args.do_predict: logger.info("*** Predict ***") # Loop to handle MNLI double evaluation (matched, mis-matched) tasks = [data_args.task_name] predict_datasets = [predict_dataset] if data_args.task_name == "mnli": tasks.append("mnli-mm") predict_datasets.append(raw_datasets["test_mismatched"]) for predict_dataset, task in zip(predict_datasets, tasks): # Removing the `label` columns because it contains -1 and Trainer won't like that. predict_dataset = predict_dataset.remove_columns("label") predictions = trainer.predict(predict_dataset, metric_key_prefix="predict").predictions predictions = np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1) output_predict_file = os.path.join(training_args.output_dir, f"predict_results_{task}.txt") if trainer.is_world_process_zero(): with open(output_predict_file, "w") as writer: logger.info(f"***** Predict results {task} *****") writer.write("index\tprediction\n") for index, item in enumerate(predictions): if is_regression: writer.write(f"{index}\t{item:3.3f}\n") else: item = label_list[item] writer.write(f"{index}\t{item}\n") kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"} if data_args.task_name is not None: kwargs["language"] = "en" kwargs["dataset_tags"] = "glue" kwargs["dataset_args"] = data_args.task_name kwargs["dataset"] = f"GLUE {data_args.task_name.upper()}" if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
-1
huggingface/transformers
20,304
fix device issue
# What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
ydshieh
"2022-11-17T17:48:16Z"
"2022-11-21T09:12:26Z"
d316037ad71f8748aac9045ffd96970826456a04
8503cc755050c6ed5bc771e3244c29b71be1841e
fix device issue. # What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
./src/transformers/models/tapas/modeling_tf_tapas.py
# coding=utf-8 # Copyright 2021 Google Research and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF 2.0 TAPAS model.""" import enum import math from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import ( TFBaseModelOutputWithPastAndCrossAttentions, TFBaseModelOutputWithPooling, TFMaskedLMOutput, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import ( TFMaskedLanguageModelingLoss, TFModelInputType, TFPreTrainedModel, TFSequenceClassificationLoss, get_initializer, keras_serializable, unpack_inputs, ) from ...tf_utils import shape_list, stable_softmax from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, is_tensorflow_probability_available, logging, replace_return_docstrings, requires_backends, ) from .configuration_tapas import TapasConfig logger = logging.get_logger(__name__) # soft dependency if is_tensorflow_probability_available(): try: import tensorflow_probability as tfp # On the first call, check whether a compatible version of TensorFlow is installed # TensorFlow Probability depends on a recent stable release of TensorFlow n = tfp.distributions.Normal(loc=0.0, scale=1.0) except ImportError: logger.error( "TAPAS models are not usable since `tensorflow_probability` can't be loaded." "It seems you have `tensorflow_probability` installed with the wrong tensorflow version." "Please try to reinstall it following the instructions here: https://github.com/tensorflow/probability." ) _CONFIG_FOR_DOC = "TapasConfig" _TOKENIZER_FOR_DOC = "TapasTokenizer" _CHECKPOINT_FOR_DOC = "google/tapas-base" TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST = [ # large models "google/tapas-large", "google/tapas-large-finetuned-sqa", "google/tapas-large-finetuned-wtq", "google/tapas-large-finetuned-wikisql-supervised", "google/tapas-large-finetuned-tabfact", # base models "google/tapas-base", "google/tapas-base-finetuned-sqa", "google/tapas-base-finetuned-wtq", "google/tapas-base-finetuned-wikisql-supervised", "google/tapas-base-finetuned-tabfact", # small models "google/tapas-small", "google/tapas-small-finetuned-sqa", "google/tapas-small-finetuned-wtq", "google/tapas-small-finetuned-wikisql-supervised", "google/tapas-small-finetuned-tabfact", # mini models "google/tapas-mini", "google/tapas-mini-finetuned-sqa", "google/tapas-mini-finetuned-wtq", "google/tapas-mini-finetuned-wikisql-supervised", "google/tapas-mini-finetuned-tabfact", # tiny models "google/tapas-tiny", "google/tapas-tiny-finetuned-sqa", "google/tapas-tiny-finetuned-wtq", "google/tapas-tiny-finetuned-wikisql-supervised", "google/tapas-tiny-finetuned-tabfact", # See all TAPAS models at https://huggingface.co/models?filter=tapas ] EPSILON_ZERO_DIVISION = 1e-10 CLOSE_ENOUGH_TO_LOG_ZERO = -10000.0 @dataclass class TFTableQuestionAnsweringOutput(ModelOutput): """ Output type of [`TFTapasForQuestionAnswering`]. Args: loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` (and possibly `answer`, `aggregation_labels`, `numeric_values` and `numeric_values_scale` are provided)): Total loss as the sum of the hierarchical cell selection log-likelihood loss and (optionally) the semi-supervised regression loss and (optionally) supervised loss for aggregations. logits (`tf.Tensor` of shape `(batch_size, sequence_length)`): Prediction scores of the cell selection head, for every token. logits_aggregation (`tf.Tensor`, *optional*, of shape `(batch_size, num_aggregation_labels)`): Prediction scores of the aggregation head, for every aggregation operator. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[tf.Tensor] = None logits: tf.Tensor = None logits_aggregation: Optional[tf.Tensor] = None hidden_states: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[tf.Tensor]] = None class TFTapasEmbeddings(tf.keras.layers.Layer): """ Construct the embeddings from word, position and token_type embeddings. Same as BertEmbeddings but with a number of additional token type embeddings to encode tabular structure. """ def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.vocab_size = config.vocab_size self.type_vocab_sizes = config.type_vocab_sizes self.number_of_token_type_embeddings = len(config.type_vocab_sizes) self.reset_position_index_per_cell = config.reset_position_index_per_cell self.hidden_size = config.hidden_size self.max_position_embeddings = config.max_position_embeddings self.initializer_range = config.initializer_range self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) def build(self, input_shape: tf.TensorShape): with tf.name_scope("word_embeddings"): self.weight = self.add_weight( name="weight", shape=[self.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range), ) with tf.name_scope("position_embeddings"): self.position_embeddings = self.add_weight( name="embeddings", shape=[self.max_position_embeddings, self.hidden_size], initializer=get_initializer(self.initializer_range), ) for i, type_vocab_size in enumerate(self.type_vocab_sizes): with tf.name_scope(f"token_type_embeddings_{i}"): setattr( self, f"token_type_embeddings_{i}", self.add_weight( name="embeddings", shape=[type_vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range), ), ) super().build(input_shape) def call( self, input_ids: tf.Tensor = None, position_ids: tf.Tensor = None, token_type_ids: tf.Tensor = None, inputs_embeds: tf.Tensor = None, training: bool = False, ) -> tf.Tensor: """ Applies embedding based on inputs tensor. Returns: final_embeddings (`tf.Tensor`): output embedding tensor. """ assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: input_shape = shape_list(input_ids) else: input_shape = shape_list(inputs_embeds)[:-1] seq_length = input_shape[1] if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape + [self.number_of_token_type_embeddings], value=0) if position_ids is None: # create absolute position embeddings position_ids = tf.expand_dims(tf.range(start=0, limit=seq_length), axis=0) position_ids = tf.broadcast_to(position_ids, shape=input_shape) # when self.config.reset_position_index_per_cell is set to True, create relative position embeddings if self.reset_position_index_per_cell: # shape (batch_size, seq_len) col_index = IndexMap(token_type_ids[:, :, 1], self.type_vocab_sizes[1], batch_dims=1) # shape (batch_size, seq_len) row_index = IndexMap(token_type_ids[:, :, 2], self.type_vocab_sizes[2], batch_dims=1) # shape (batch_size, seq_len) full_index = ProductIndexMap(col_index, row_index) # shape (max_rows * max_columns,). First absolute position for every cell first_position_per_segment = reduce_min(position_ids, full_index)[0] # ? shape (batch_size, seq_len). First absolute position of the cell for every token first_position = gather(first_position_per_segment, full_index) # shape (1, seq_len) position = tf.expand_dims(tf.range(start=0, limit=seq_length), axis=0) position_ids = tf.math.minimum(self.max_position_embeddings - 1, position - first_position) if input_ids is not None: inputs_embeds = tf.gather(params=self.weight, indices=input_ids) position_embeddings = tf.gather(self.position_embeddings, indices=position_ids) final_embeddings = inputs_embeds + position_embeddings for i in range(self.number_of_token_type_embeddings): name = f"token_type_embeddings_{i}" final_embeddings += tf.gather(params=getattr(self, name), indices=token_type_ids[:, :, i]) final_embeddings = self.LayerNorm(inputs=final_embeddings) final_embeddings = self.dropout(inputs=final_embeddings, training=training) return final_embeddings # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention with Bert->Tapas class TFTapasSelfAttention(tf.keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number " f"of attention heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.sqrt_att_head_size = math.sqrt(self.attention_head_size) self.query = tf.keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query" ) self.key = tf.keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key" ) self.value = tf.keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value" ) self.dropout = tf.keras.layers.Dropout(rate=config.attention_probs_dropout_prob) self.is_decoder = config.is_decoder def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor: # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size] tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size)) # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size] return tf.transpose(tensor, perm=[0, 2, 1, 3]) def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor, encoder_attention_mask: tf.Tensor, past_key_value: Tuple[tf.Tensor], output_attentions: bool, training: bool = False, ) -> Tuple[tf.Tensor]: batch_size = shape_list(hidden_states)[0] mixed_query_layer = self.query(inputs=hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(inputs=encoder_hidden_states), batch_size) value_layer = self.transpose_for_scores(self.value(inputs=encoder_hidden_states), batch_size) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size) value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size) key_layer = tf.concat([past_key_value[0], key_layer], axis=2) value_layer = tf.concat([past_key_value[1], value_layer], axis=2) else: key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size) value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) if self.is_decoder: # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. # (batch size, num_heads, seq_len_q, seq_len_k) attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype) attention_scores = tf.divide(attention_scores, dk) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in TFTapasModel call() function) attention_scores = tf.add(attention_scores, attention_mask) # Normalize the attention scores to probabilities. attention_probs = stable_softmax(logits=attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(inputs=attention_probs, training=training) # Mask heads if we want to if head_mask is not None: attention_probs = tf.multiply(attention_probs, head_mask) attention_output = tf.matmul(attention_probs, value_layer) attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, all_head_size) attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size)) outputs = (attention_output, attention_probs) if output_attentions else (attention_output,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput with Bert->Tapas class TFTapasSelfOutput(tf.keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_tf_bert.TFBertAttention with Bert->Tapas class TFTapasAttention(tf.keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.self_attention = TFTapasSelfAttention(config, name="self") self.dense_output = TFTapasSelfOutput(config, name="output") def prune_heads(self, heads): raise NotImplementedError def call( self, input_tensor: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor, encoder_attention_mask: tf.Tensor, past_key_value: Tuple[tf.Tensor], output_attentions: bool, training: bool = False, ) -> Tuple[tf.Tensor]: self_outputs = self.self_attention( hidden_states=input_tensor, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, training=training, ) attention_output = self.dense_output( hidden_states=self_outputs[0], input_tensor=input_tensor, training=training ) # add attentions (possibly with past_key_value) if we output them outputs = (attention_output,) + self_outputs[1:] return outputs # Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate with Bert->Tapas class TFTapasIntermediate(tf.keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) if isinstance(config.hidden_act, str): self.intermediate_act_fn = get_tf_activation(config.hidden_act) else: self.intermediate_act_fn = config.hidden_act def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput with Bert->Tapas class TFTapasOutput(tf.keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_tf_bert.TFBertLayer with Bert->Tapas class TFTapasLayer(tf.keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.attention = TFTapasAttention(config, name="attention") self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = TFTapasAttention(config, name="crossattention") self.intermediate = TFTapasIntermediate(config, name="intermediate") self.bert_output = TFTapasOutput(config, name="output") def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: Optional[tf.Tensor], encoder_attention_mask: Optional[tf.Tensor], past_key_value: Optional[Tuple[tf.Tensor]], output_attentions: bool, training: bool = False, ) -> Tuple[tf.Tensor]: # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( input_tensor=hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=self_attn_past_key_value, output_attentions=output_attentions, training=training, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" " by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( input_tensor=attention_output, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, training=training, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value intermediate_output = self.intermediate(hidden_states=attention_output) layer_output = self.bert_output( hidden_states=intermediate_output, input_tensor=attention_output, training=training ) outputs = (layer_output,) + outputs # add attentions if we output them # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs # Copied from transformers.models.bert.modeling_tf_bert.TFBertEncoder with Bert->Tapas class TFTapasEncoder(tf.keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.config = config self.layer = [TFTapasLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)] def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: Optional[tf.Tensor], encoder_attention_mask: Optional[tf.Tensor], past_key_values: Optional[Tuple[Tuple[tf.Tensor]]], use_cache: Optional[bool], output_attentions: bool, output_hidden_states: bool, return_dict: bool, training: bool = False, ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) past_key_value = past_key_values[i] if past_key_values is not None else None layer_outputs = layer_module( hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask[i], encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, training=training, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if self.config.add_cross_attention and encoder_hidden_states is not None: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, all_hidden_states, all_attentions, all_cross_attentions] if v is not None ) return TFBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) # Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->Tapas class TFTapasPooler(tf.keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation="tanh", name="dense", ) def call(self, hidden_states: tf.Tensor) -> tf.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(inputs=first_token_tensor) return pooled_output # Copied from transformers.models.bert.modeling_tf_bert.TFBertPredictionHeadTransform with Bert->Tapas class TFTapasPredictionHeadTransform(tf.keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) if isinstance(config.hidden_act, str): self.transform_act_fn = get_tf_activation(config.hidden_act) else: self.transform_act_fn = config.hidden_act self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(inputs=hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_tf_bert.TFBertLMPredictionHead with Bert->Tapas class TFTapasLMPredictionHead(tf.keras.layers.Layer): def __init__(self, config: TapasConfig, input_embeddings: tf.keras.layers.Layer, **kwargs): super().__init__(**kwargs) self.vocab_size = config.vocab_size self.hidden_size = config.hidden_size self.transform = TFTapasPredictionHeadTransform(config, name="transform") # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.input_embeddings = input_embeddings def build(self, input_shape: tf.TensorShape): self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias") super().build(input_shape) def get_output_embeddings(self) -> tf.keras.layers.Layer: return self.input_embeddings def set_output_embeddings(self, value: tf.Variable): self.input_embeddings.weight = value self.input_embeddings.vocab_size = shape_list(value)[0] def get_bias(self) -> Dict[str, tf.Variable]: return {"bias": self.bias} def set_bias(self, value: tf.Variable): self.bias = value["bias"] self.vocab_size = shape_list(value["bias"])[0] def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.transform(hidden_states=hidden_states) seq_length = shape_list(hidden_states)[1] hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size]) hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True) hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.vocab_size]) hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias) return hidden_states # Copied from transformers.models.bert.modeling_tf_bert.TFBertMLMHead with Bert->Tapas class TFTapasMLMHead(tf.keras.layers.Layer): def __init__(self, config: TapasConfig, input_embeddings: tf.keras.layers.Layer, **kwargs): super().__init__(**kwargs) self.predictions = TFTapasLMPredictionHead(config, input_embeddings, name="predictions") def call(self, sequence_output: tf.Tensor) -> tf.Tensor: prediction_scores = self.predictions(hidden_states=sequence_output) return prediction_scores @keras_serializable class TFTapasMainLayer(tf.keras.layers.Layer): config_class = TapasConfig def __init__(self, config: TapasConfig, add_pooling_layer: bool = True, **kwargs): requires_backends(self, "tensorflow_probability") super().__init__(**kwargs) self.config = config self.embeddings = TFTapasEmbeddings(config, name="embeddings") self.encoder = TFTapasEncoder(config, name="encoder") self.pooler = TFTapasPooler(config, name="pooler") if add_pooling_layer else None def get_input_embeddings(self) -> tf.keras.layers.Layer: return self.embeddings def set_input_embeddings(self, value: tf.Variable): self.embeddings.weight = value self.embeddings.vocab_size = shape_list(value)[0] def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ raise NotImplementedError @unpack_inputs def call( self, input_ids: Optional[TFModelInputType] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if attention_mask is None: attention_mask = tf.fill(dims=input_shape, value=1) if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape + [len(self.config.type_vocab_sizes)], value=0) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, training=training, ) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1])) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype) one_cst = tf.constant(1.0, dtype=embedding_output.dtype) ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype) extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.config.num_hidden_layers encoder_outputs = self.encoder( hidden_states=embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None if not return_dict: return ( sequence_output, pooled_output, ) + encoder_outputs[1:] return TFBaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class TFTapasPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = TapasConfig base_model_prefix = "tapas" @tf.function( input_signature=[ { "input_ids": tf.TensorSpec((None, None), tf.int64, name="input_ids"), "attention_mask": tf.TensorSpec((None, None), tf.float32, name="attention_mask"), "token_type_ids": tf.TensorSpec((None, None, None), tf.int64, name="token_type_ids"), } ] ) def serving(self, inputs): output = self.call(inputs) return self.serving_output(output) TAPAS_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Parameters: config ([`TapasConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ TAPAS_INPUTS_DOCSTRING = r""" Args: input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`TapasTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0}, 7)`, *optional*): Token indices that encode tabular structure. Indices can be obtained using [`TapasTokenizer`]. See this class for more info. [What are token type IDs?](../glossary#token-type-ids) position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. If `reset_position_index_per_cell` of [`TapasConfig`] is set to `True`, relative position embeddings will be used. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`np.ndarray` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False``): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare Tapas Model transformer outputting raw hidden-states without any specific head on top.", TAPAS_START_DOCSTRING, ) class TFTapasModel(TFTapasPreTrainedModel): def __init__(self, config: TapasConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.tapas = TFTapasMainLayer(config, name="tapas") @unpack_inputs @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: Optional[TFModelInputType] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: r""" Returns: Examples: ```python >>> from transformers import TapasTokenizer, TapasModel >>> import pandas as pd >>> tokenizer = TapasTokenizer.from_pretrained("google/tapas-base") >>> model = TapasModel.from_pretrained("google/tapas-base") >>> data = { ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], ... "Age": ["56", "45", "59"], ... "Number of movies": ["87", "53", "69"], ... } >>> table = pd.DataFrame.from_dict(data) >>> queries = ["How many movies has George Clooney played in?", "How old is Brad Pitt?"] >>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="tf") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ```""" outputs = self.tapas( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def serving_output(self, output: TFBaseModelOutputWithPooling) -> TFBaseModelOutputWithPooling: hidden_states = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attentions = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFBaseModelOutputWithPooling( last_hidden_state=output.last_hidden_state, pooler_output=output.pooler_output, hidden_states=hidden_states, attentions=attentions, ) @add_start_docstrings("""Tapas Model with a `language modeling` head on top.""", TAPAS_START_DOCSTRING) class TFTapasForMaskedLM(TFTapasPreTrainedModel, TFMaskedLanguageModelingLoss): def __init__(self, config: TapasConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) if config.is_decoder: logger.warning( "If you want to use `TFTapasForMaskedLM` make sure `config.is_decoder=False` for " "bi-directional self-attention." ) self.tapas = TFTapasMainLayer(config, add_pooling_layer=False, name="tapas") self.lm_head = TFTapasMLMHead(config, input_embeddings=self.tapas.embeddings, name="cls") def get_lm_head(self) -> tf.keras.layers.Layer: return self.lm_head.predictions @unpack_inputs @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: Optional[TFModelInputType] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[Union[np.ndarray, tf.Tensor]] = None, training: Optional[bool] = False, ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` Returns: Examples: ```python >>> from transformers import TapasTokenizer, TapasForMaskedLM >>> import pandas as pd >>> tokenizer = TapasTokenizer.from_pretrained("google/tapas-base") >>> model = TapasForMaskedLM.from_pretrained("google/tapas-base") >>> data = { ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], ... "Age": ["56", "45", "59"], ... "Number of movies": ["87", "53", "69"], ... } >>> table = pd.DataFrame.from_dict(data) >>> inputs = tokenizer( ... table=table, queries="How many [MASK] has George [MASK] played in?", return_tensors="tf" ... ) >>> labels = tokenizer( ... table=table, queries="How many movies has George Clooney played in?", return_tensors="tf" ... )["input_ids"] >>> outputs = model(**inputs, labels=labels) >>> logits = outputs.logits ```""" outputs = self.tapas( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=prediction_scores) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFMaskedLMOutput( loss=loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def serving_output(self, output: TFMaskedLMOutput) -> TFMaskedLMOutput: hidden_states = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attentions = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFMaskedLMOutput(logits=output.logits, hidden_states=hidden_states, attentions=attentions) class TFTapasComputeTokenLogits(tf.keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.temperature = config.temperature # cell selection heads with tf.name_scope("output"): self.output_weights = self.add_weight( name="output_weights", shape=(config.hidden_size,), dtype=tf.float32, trainable=True, initializer=tf.zeros_initializer() if config.init_cell_selection_weights_to_zero else tf.keras.initializers.TruncatedNormal(stddev=config.initializer_range), ) self.output_bias = self.add_weight( name="output_bias", shape=(), trainable=True, initializer=tf.zeros_initializer() ) def call(self, sequence_output: tf.Tensor) -> tf.Tensor: """ Computes logits per token Args: sequence_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Also known as last_hidden_state. Sequence of hidden-states at the output of the last layer of the model. Returns: logits (`tf.Tensor` of shape `(batch_size, sequence_length)`): Logits per token. """ logits = (tf.einsum("bsj,j->bs", sequence_output, self.output_weights) + self.output_bias) / self.temperature return logits class TFTapasComputeColumnLogits(tf.keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) with tf.name_scope("column_output"): self.column_output_weights = self.add_weight( name="column_output_weights", shape=[config.hidden_size], dtype=tf.float32, trainable=True, initializer=tf.zeros_initializer() if config.init_cell_selection_weights_to_zero else tf.keras.initializers.TruncatedNormal(stddev=config.initializer_range), ) self.column_output_bias = self.add_weight( name="column_output_bias", shape=(), trainable=True, initializer=tf.zeros_initializer() ) def call(self, sequence_output, cell_index, cell_mask, allow_empty_column_selection) -> tf.Tensor: """ Computes the column logits. Args: sequence_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Also known as last_hidden_state. Sequence of hidden-states at the output of the last layer of the model. cell_index (`ProductIndexMap`): Index that groups tokens into cells. cell_mask (`tf.Tensor` of shape `(batch_size, max_num_rows * max_num_cols)`): Mask for cells that exist in the table (i.e. that are not padding). allow_empty_column_selection (`bool`): Whether to allow not to select any column Returns: column_logits (`tf.Tensor`of shape `(batch_size, max_num_cols)`): Tensor containing the column logits for every example in the batch. """ # First, compute the token logits (batch_size, seq_len) - without temperature token_logits = tf.einsum("bsj,j->bs", sequence_output, self.column_output_weights) + self.column_output_bias # Next, average the logits per cell (batch_size, max_num_cols*max_num_rows) cell_logits, cell_logits_index = reduce_mean(token_logits, cell_index) # Finally, average the logits per column (batch_size, max_num_cols) column_index = cell_index.project_inner(cell_logits_index) column_logits, out_index = reduce_sum(cell_logits * cell_mask, column_index) cell_count, _ = reduce_sum(cell_mask, column_index) column_logits /= cell_count + EPSILON_ZERO_DIVISION # Mask columns that do not appear in the example. is_padding = tf.logical_and(cell_count < 0.5, tf.not_equal(out_index.indices, 0)) column_logits += CLOSE_ENOUGH_TO_LOG_ZERO * tf.cast(is_padding, tf.float32) if not allow_empty_column_selection: column_logits += CLOSE_ENOUGH_TO_LOG_ZERO * tf.cast(tf.equal(out_index.indices, 0), tf.float32) return column_logits @add_start_docstrings( """ Tapas Model with a cell selection head and optional aggregation head on top for question-answering tasks on tables (linear layers on top of the hidden-states output to compute `logits` and optional `logits_aggregation`), e.g. for SQA, WTQ or WikiSQL-supervised tasks. """, TAPAS_START_DOCSTRING, ) class TFTapasForQuestionAnswering(TFTapasPreTrainedModel): def __init__(self, config: TapasConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) # base model self.tapas = TFTapasMainLayer(config, name="tapas") # dropout self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) self.compute_token_logits = TFTapasComputeTokenLogits(config, name="compute_token_logits") self.compute_column_logits = TFTapasComputeColumnLogits(config, name="compute_column_logits") if config.num_aggregation_labels > 0: self.aggregation_classifier = tf.keras.layers.Dense( config.num_aggregation_labels, kernel_initializer=get_initializer(config.initializer_range), name="aggregation_classifier", ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFTableQuestionAnsweringOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: Optional[TFModelInputType] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, table_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, aggregation_labels: Optional[Union[np.ndarray, tf.Tensor]] = None, float_answer: Optional[Union[np.ndarray, tf.Tensor]] = None, numeric_values: Optional[Union[np.ndarray, tf.Tensor]] = None, numeric_values_scale: Optional[Union[np.ndarray, tf.Tensor]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[Union[np.ndarray, tf.Tensor]] = None, training: Optional[bool] = False, ) -> Union[TFTableQuestionAnsweringOutput, Tuple[tf.Tensor]]: r""" table_mask (`tf.Tensor` of shape `(batch_size, seq_length)`, *optional*): Mask for the table. Indicates which tokens belong to the table (1). Question tokens, table headers and padding are 0. labels (`tf.Tensor` of shape `(batch_size, seq_length)`, *optional*): Labels per token for computing the hierarchical cell selection loss. This encodes the positions of the answer appearing in the table. Can be obtained using [`TapasTokenizer`]. - 1 for tokens that are **part of the answer**, - 0 for tokens that are **not part of the answer**. aggregation_labels (`tf.Tensor` of shape `(batch_size, )`, *optional*): Aggregation function index for every example in the batch for computing the aggregation loss. Indices should be in `[0, ..., config.num_aggregation_labels - 1]`. Only required in case of strong supervision for aggregation (WikiSQL-supervised). float_answer (`tf.Tensor` of shape `(batch_size, )`, *optional*): Float answer for every example in the batch. Set to *float('nan')* for cell selection questions. Only required in case of weak supervision (WTQ) to calculate the aggregate mask and regression loss. numeric_values (`tf.Tensor` of shape `(batch_size, seq_length)`, *optional*): Numeric values of every token, NaN for tokens which are not numeric values. Can be obtained using [`TapasTokenizer`]. Only required in case of weak supervision for aggregation (WTQ) to calculate the regression loss. numeric_values_scale (`tf.Tensor` of shape `(batch_size, seq_length)`, *optional*): Scale of the numeric values of every token. Can be obtained using [`TapasTokenizer`]. Only required in case of weak supervision for aggregation (WTQ) to calculate the regression loss. Returns: Examples: ```python >>> from transformers import TapasTokenizer, TapasForQuestionAnswering >>> import pandas as pd >>> tokenizer = TapasTokenizer.from_pretrained("google/tapas-base-finetuned-wtq") >>> model = TapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wtq") >>> data = { ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], ... "Age": ["56", "45", "59"], ... "Number of movies": ["87", "53", "69"], ... } >>> table = pd.DataFrame.from_dict(data) >>> queries = ["How many movies has George Clooney played in?", "How old is Brad Pitt?"] >>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="tf") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> logits_aggregation = outputs.logits_aggregation ```""" outputs = self.tapas( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] pooled_output = outputs[1] sequence_output = self.dropout(sequence_output) if input_ids is not None: input_shape = shape_list(input_ids) else: input_shape = shape_list(inputs_embeds)[:-1] # Construct indices for the table. if token_type_ids is None: token_type_ids = tf.fill(input_shape + [len(self.config.type_vocab_sizes)], 0) token_types = [ "segment_ids", "column_ids", "row_ids", "prev_labels", "column_ranks", "inv_column_ranks", "numeric_relations", ] row_ids = token_type_ids[:, :, token_types.index("row_ids")] column_ids = token_type_ids[:, :, token_types.index("column_ids")] # Construct indices for the table. row_index = IndexMap( indices=tf.minimum(tf.cast(row_ids, tf.int32), self.config.max_num_rows - 1), num_segments=self.config.max_num_rows, batch_dims=1, ) col_index = IndexMap( indices=tf.minimum(tf.cast(column_ids, tf.int32), self.config.max_num_columns - 1), num_segments=self.config.max_num_columns, batch_dims=1, ) cell_index = ProductIndexMap(row_index, col_index) # Masks. input_shape = shape_list(input_ids) if input_ids is not None else shape_list(inputs_embeds)[:-1] if attention_mask is None: attention_mask = tf.ones(input_shape) # Table cells only, without question tokens and table headers. if table_mask is None: table_mask = tf.where(row_ids > 0, tf.ones_like(row_ids), tf.zeros_like(row_ids)) # <float32>[batch_size, seq_length] input_mask_float = tf.cast(attention_mask, tf.float32) table_mask_float = tf.cast(table_mask, tf.float32) # Mask for cells that exist in the table (i.e. that are not padding). cell_mask, _ = reduce_mean(input_mask_float, cell_index) # Compute logits per token. These are used to select individual cells. logits = self.compute_token_logits(sequence_output) # Compute logits per column. These are used to select a column. column_logits = None if self.config.select_one_column: column_logits = self.compute_column_logits( sequence_output, cell_index, cell_mask, self.config.allow_empty_column_selection ) # Aggregate logits. logits_aggregation = None if self.config.num_aggregation_labels > 0: logits_aggregation = self.aggregation_classifier(pooled_output) # Total loss calculation total_loss = tf.zeros(shape=(1,), dtype=tf.float32) calculate_loss = False if labels is not None: calculate_loss = True is_supervised = not self.config.num_aggregation_labels > 0 or not self.config.use_answer_as_supervision # Semi-supervised cell selection in case of no aggregation: # If the answer (the denotation) appears directly in the table we might # select the answer without applying any aggregation function. There are # some ambiguous cases, see utils._calculate_aggregate_mask for more info. # `aggregate_mask` is 1 for examples where we chose to aggregate and 0 # for examples where we chose to select the answer directly. # `labels` encodes the positions of the answer appearing in the table. if is_supervised: aggregate_mask = None else: if float_answer is not None: assert ( shape_list(labels)[0] == shape_list(float_answer)[0] ), "Make sure the answers are a FloatTensor of shape (batch_size,)" # <float32>[batch_size] aggregate_mask = _calculate_aggregate_mask( float_answer, pooled_output, self.config.cell_selection_preference, labels, self.aggregation_classifier, ) else: aggregate_mask = None raise ValueError("You have to specify float answers in order to calculate the aggregate mask") # Cell selection log-likelihood if self.config.average_logits_per_cell: logits_per_cell, _ = reduce_mean(logits, cell_index) logits = gather(logits_per_cell, cell_index) dist_per_token = tfp.distributions.Bernoulli(logits=logits) # Compute cell selection loss per example. selection_loss_per_example = None if not self.config.select_one_column: weight = tf.where( labels == 0, tf.ones_like(labels, dtype=tf.float32), self.config.positive_label_weight * tf.ones_like(labels, dtype=tf.float32), ) selection_loss_per_token = -dist_per_token.log_prob(labels) * weight selection_loss_per_example = tf.reduce_sum(selection_loss_per_token * input_mask_float, axis=1) / ( tf.reduce_sum(input_mask_float, axis=1) + EPSILON_ZERO_DIVISION ) else: selection_loss_per_example, logits = _single_column_cell_selection_loss( logits, column_logits, labels, cell_index, col_index, cell_mask ) dist_per_token = tfp.distributions.Bernoulli(logits=logits) # Supervised cell selection if self.config.disable_per_token_loss: pass elif is_supervised: total_loss += tf.reduce_mean(selection_loss_per_example) else: # For the not supervised case, do not assign loss for cell selection total_loss += tf.reduce_mean(selection_loss_per_example * (1.0 - aggregate_mask)) # Semi-supervised regression loss and supervised loss for aggregations if self.config.num_aggregation_labels > 0: if is_supervised: # Note that `aggregate_mask` is None if the setting is supervised. if aggregation_labels is not None: assert ( shape_list(labels)[0] == shape_list(aggregation_labels)[0] ), "Make sure the aggregation labels are a LongTensor of shape (batch_size,)" per_example_additional_loss = _calculate_aggregation_loss( logits_aggregation, aggregate_mask, aggregation_labels, self.config.use_answer_as_supervision, self.config.num_aggregation_labels, self.config.aggregation_loss_weight, ) else: raise ValueError( "You have to specify aggregation labels in order to calculate the aggregation loss" ) else: aggregation_labels = tf.zeros(shape_list(labels)[0], dtype=tf.int32) per_example_additional_loss = _calculate_aggregation_loss( logits_aggregation, aggregate_mask, aggregation_labels, self.config.use_answer_as_supervision, self.config.num_aggregation_labels, self.config.aggregation_loss_weight, ) if self.config.use_answer_as_supervision: if numeric_values is not None and numeric_values_scale is not None: assert shape_list(numeric_values) == shape_list(numeric_values_scale) # Add regression loss for numeric answers which require aggregation. answer_loss, large_answer_loss_mask = _calculate_regression_loss( float_answer, aggregate_mask, dist_per_token, numeric_values, numeric_values_scale, table_mask_float, logits_aggregation, self.config, ) per_example_additional_loss += answer_loss # Zero loss for examples with answer_loss > cutoff. per_example_additional_loss *= large_answer_loss_mask else: raise ValueError( "You have to specify numeric values and numeric values scale in order to calculate the" " regression loss" ) total_loss += tf.reduce_mean(per_example_additional_loss) else: # if no label ids are provided, set them to zeros in order to properly compute logits labels = tf.zeros_like(logits) _, logits = _single_column_cell_selection_loss( logits, column_logits, labels, cell_index, col_index, cell_mask ) if not return_dict: output = (logits, logits_aggregation) + outputs[2:] return ((total_loss,) + output) if calculate_loss else output return TFTableQuestionAnsweringOutput( loss=total_loss if calculate_loss else None, logits=logits, logits_aggregation=logits_aggregation, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def serving_output(self, output: TFTableQuestionAnsweringOutput) -> TFTableQuestionAnsweringOutput: hidden_states = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attentions = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFTableQuestionAnsweringOutput( logits=output.logits, logits_aggregation=output.logits_aggregation, hidden_states=hidden_states, attentions=attentions, ) @add_start_docstrings( """ Tapas Model with a sequence classification head on top (a linear layer on top of the pooled output), e.g. for table entailment tasks, such as TabFact (Chen et al., 2020). """, TAPAS_START_DOCSTRING, ) class TFTapasForSequenceClassification(TFTapasPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config: TapasConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.tapas = TFTapasMainLayer(config, name="tapas") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob, name="dropout") self.classifier = tf.keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) @unpack_inputs @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @replace_return_docstrings(output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: Optional[TFModelInputType] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[Union[np.ndarray, tf.Tensor]] = None, training: Optional[bool] = False, ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Note: this is called "classification_class_index" in the original implementation. Returns: Examples: ```python >>> from transformers import TapasTokenizer, TapasForSequenceClassification >>> import tensorflow as tf >>> import pandas as pd >>> tokenizer = TapasTokenizer.from_pretrained("google/tapas-base-finetuned-tabfact") >>> model = TapasForSequenceClassification.from_pretrained("google/tapas-base-finetuned-tabfact") >>> data = { ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], ... "Age": ["56", "45", "59"], ... "Number of movies": ["87", "53", "69"], ... } >>> table = pd.DataFrame.from_dict(data) >>> queries = [ ... "There is only one actor who is 45 years old", ... "There are 3 actors which played in more than 60 movies", ... ] >>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="tf") >>> labels = tf.convert_to_tensor([1, 0]) # 1 means entailed, 0 means refuted >>> outputs = model(**inputs, labels=labels) >>> loss = outputs.loss >>> logits = outputs.logits ```""" outputs = self.tapas( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) pooled_output = outputs[1] pooled_output = self.dropout(inputs=pooled_output, training=training) logits = self.classifier(inputs=pooled_output) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput: hidden_states = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attentions = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFSequenceClassifierOutput(logits=output.logits, hidden_states=hidden_states, attentions=attentions) """ TAPAS utilities.""" class AverageApproximationFunction(str, enum.Enum): RATIO = "ratio" FIRST_ORDER = "first_order" SECOND_ORDER = "second_order" # Beginning of everything related to segmented tensors class IndexMap(object): """Index grouping entries within a tensor.""" def __init__(self, indices, num_segments, batch_dims=0): """ Creates an index. Args: indices: <int32> Tensor of indices, same shape as `values`. num_segments: <int32> Scalar tensor, the number of segments. All elements in a batched segmented tensor must have the same number of segments (although many segments can be empty). batch_dims: Python integer, the number of batch dimensions. The first `batch_dims` dimensions of a SegmentedTensor are treated as batch dimensions. Segments in different batch elements are always distinct even if they have the same index. """ self.indices = tf.convert_to_tensor(indices) self.num_segments = tf.convert_to_tensor(num_segments) self.batch_dims = batch_dims def batch_shape(self): return tf.shape(self.indices)[: self.batch_dims] class ProductIndexMap(IndexMap): """The product of two indices.""" def __init__(self, outer_index, inner_index): """ Combines indices i and j into pairs (i, j). The result is an index where each segment (i, j) is the intersection of segments i and j. For example if the inputs represent table cells indexed by respectively rows and columns the output will be a table indexed by (row, column) pairs, i.e. by cell. The implementation combines indices {0, .., n - 1} and {0, .., m - 1} into {0, .., nm - 1}. The output has `num_segments` equal to `outer_index.num_segements` * `inner_index.num_segments`. Args: outer_index: IndexMap. inner_index: IndexMap, must have the same shape as `outer_index`. """ if outer_index.batch_dims != inner_index.batch_dims: raise ValueError("outer_index.batch_dims and inner_index.batch_dims must be the same.") super(ProductIndexMap, self).__init__( indices=( inner_index.indices + outer_index.indices * tf.cast(inner_index.num_segments, inner_index.indices.dtype) ), num_segments=inner_index.num_segments * outer_index.num_segments, batch_dims=inner_index.batch_dims, ) self.outer_index = outer_index self.inner_index = inner_index def project_outer(self, index): """Projects an index with the same index set onto the outer components.""" return IndexMap( indices=tf.math.floordiv(index.indices, self.inner_index.num_segments), num_segments=self.outer_index.num_segments, batch_dims=index.batch_dims, ) def project_inner(self, index): """Projects an index with the same index set onto the inner components.""" return IndexMap( indices=tf.math.floormod(index.indices, self.inner_index.num_segments), num_segments=self.inner_index.num_segments, batch_dims=index.batch_dims, ) def gather(values, index, name="segmented_gather"): """ Gathers from `values` using the index map. For each element in the domain of the index map this operation looks up a value for that index in `values`. Two elements from the same segment always get assigned the same value. Args: values: [B1, ..., Bn, num_segments, V1, ...] Tensor with segment values. index: [B1, ..., Bn, I1, ..., Ik] IndexMap. name: Name for the TensorFlow operation. Returns: [B1, ..., Bn, I1, ..., Ik, V1, ...] Tensor with the gathered values. """ return tf.gather(values, index.indices, batch_dims=index.batch_dims, name=name) def flatten(index, name="segmented_flatten"): """ Flattens a batched index map to a 1d index map. This operation relabels the segments to keep batch elements distinct. The k-th batch element will have indices shifted by `num_segments` * (k - 1). The result is a tensor with `num_segments` multiplied by the number of elements in the batch. Args: index: IndexMap to flatten. name: Name for the TensorFlow operation. Returns: The flattened IndexMap. """ batch_size = tf.reduce_prod(index.batch_shape()) offset = tf.range(batch_size) * index.num_segments offset = tf.reshape(offset, index.batch_shape()) for _ in range(index.batch_dims, index.indices.shape.rank): offset = tf.expand_dims(offset, -1) indices = tf.cast(offset, index.indices.dtype) + index.indices return IndexMap(indices=tf.reshape(indices, [-1]), num_segments=index.num_segments * batch_size, batch_dims=0) def range_index_map(batch_shape, num_segments, name="range_index_map"): """ Constructs an index map equal to range(num_segments). Args: batch_shape (`tf.Tensor`): Batch shape num_segments (`int`): Number of segments name (`str`, *optional*, defaults to 'range_index_map'): Name for the operation. Currently not used Returns: (`IndexMap`): IndexMap of shape batch_shape with elements equal to range(num_segments). """ batch_shape = tf.convert_to_tensor(batch_shape) batch_shape.shape.assert_has_rank(1) num_segments = tf.convert_to_tensor(num_segments) num_segments.shape.assert_has_rank(0) indices = tf.range(num_segments) shape = tf.concat([tf.ones_like(batch_shape, dtype=tf.int32), tf.expand_dims(num_segments, axis=0)], axis=0) indices = tf.reshape(indices, shape) multiples = tf.concat([batch_shape, [1]], axis=0) indices = tf.tile(indices, multiples) return IndexMap(indices=indices, num_segments=num_segments, batch_dims=batch_shape.shape.as_list()[0]) def _segment_reduce(values, index, segment_reduce_fn, name): """ Applies a segment reduction segment-wise. Args: values (`tf.Tensor`): Tensor with segment values. index (`IndexMap`): IndexMap. segment_reduce_fn (`str`): Name for the reduce operation. One of "sum", "mean", "max" or "min". name (`str`): Name for the operation. Currently not used Returns: (`IndexMap`): IndexMap of shape batch_shape with elements equal to range(num_segments). """ # Flatten the batch dimensions, as segments ops do not support batching. # However if `values` has extra dimensions to the right keep them # unflattened. Segmented ops support vector-valued operations. flat_index = flatten(index) vector_shape = tf.shape(values)[index.indices.shape.rank :] flattened_shape = tf.concat([[-1], vector_shape], axis=0) flat_values = tf.reshape(values, flattened_shape) segment_means = segment_reduce_fn( data=flat_values, segment_ids=flat_index.indices, num_segments=flat_index.num_segments ) # Unflatten the values. new_shape = tf.concat([index.batch_shape(), [index.num_segments], vector_shape], axis=0) output_values = tf.reshape(segment_means, new_shape) output_index = range_index_map(index.batch_shape(), index.num_segments) return output_values, output_index def reduce_mean(values, index, name="segmented_reduce_mean"): """ Averages a tensor over its segments. Outputs 0 for empty segments. This operations computes the mean over segments, with support for: - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices. - Vectorization using the last dimension [V1, V2, ...]. If they are present the output will be a mean of vectors rather than scalars. Only the middle dimensions [I1, ..., Ik] are reduced by the operation. Args: values: [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..] tensor of values to be averaged. index: IndexMap [B1, B2, ..., Bn, I1, .., Ik] index defining the segments. name: Name for the TensorFlow ops. Returns: A pair (output_values, output_index) where `output_values` is a tensor of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..] and `index` is an IndexMap with shape [B1, B2, ..., Bn, num_segments]. """ return _segment_reduce(values, index, tf.math.unsorted_segment_mean, name) def reduce_sum(values, index, name="segmented_reduce_sum"): """ Sums a tensor over its segments. Outputs 0 for empty segments. This operations computes the sum over segments, with support for: - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices. - Vectorization using the last dimension [V1, V2, ...]. If they are present the output will be a sum of vectors rather than scalars. Only the middle dimensions [I1, ..., Ik] are reduced by the operation. Args: values: [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..] tensor of values to be averaged. index: IndexMap [B1, B2, ..., Bn, I1, .., Ik] index defining the segments. name: Name for the TensorFlow ops. Returns: A pair (output_values, output_index) where `output_values` is a tensor of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..] and `index` is an IndexMap with shape [B1, B2, ..., Bn, num_segments]. """ return _segment_reduce(values, index, tf.math.unsorted_segment_sum, name) def reduce_max(values, index, name="segmented_reduce_max"): """ Computes the maximum over segments. This operations computes the maximum over segments, with support for: - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices. - Vectorization using the last dimension [V1, V2, ...]. If they are present the output will be an element-wise maximum of vectors rather than scalars. Only the middle dimensions [I1, ..., Ik] are reduced by the operation. Args: values: [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..] tensor of values to be averaged. index: IndexMap [B1, B2, ..., Bn, I1, .., Ik] index defining the segments. name: Name for the TensorFlow ops. Returns: A pair (output_values, output_index) where `output_values` is a tensor of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..] and `index` is an IndexMap with shape [B1, B2, ..., Bn, num_segments]. """ return _segment_reduce(values, index, tf.math.unsorted_segment_max, name) def reduce_min(values, index, name="segmented_reduce_min"): """Computes the minimum over segments.""" return _segment_reduce(values, index, tf.math.unsorted_segment_min, name) def _single_column_cell_selection_loss(token_logits, column_logits, labels, cell_index, col_index, cell_mask): """ Computes the loss for cell selection constrained to a single column. The loss is a hierarchical log-likelihood. The model first predicts a column and then selects cells within that column (conditioned on the column). Cells outside the selected column are never selected. Args: token_logits (`tf.Tensor` of shape `(batch_size, sequence_length)`): Tensor containing the logits per token. column_logits (`tf.Tensor` of shape `(batch_size, max_num_cols)`): Tensor containing the logits per column. labels (`tf.Tensor` of shape `(batch_size, sequence_length)`): Labels per token. cell_index (`ProductIndexMap`): Index that groups tokens into cells. col_index (`IndexMap`): Index that groups tokens into columns. cell_mask (`tf.Tensor` of shape `(batch_size, max_num_rows * max_num_cols)`): Mask for cells that exist in the table (i.e. that are not padding). Returns: selection_loss_per_example (`tf.Tensor` of shape `(batch_size,)`): Loss for each example. logits (`tf.Tensor` of shape `(batch_size, sequence_length)`): New logits which are only allowed to select cells in a single column. Logits outside of the most likely column according to *column_logits* will be set to a very low value (such that the probabilities are 0). """ # First find the column we should select. We use the column with maximum # number of selected cells. labels_per_column, _ = reduce_sum(tf.cast(labels, tf.float32), col_index) column_label = tf.argmax(labels_per_column, axis=-1, output_type=tf.int32) # Check if there are no selected cells in the column. In that case the model # should predict the special column id 0, which means "select nothing". no_cell_selected = tf.equal(tf.reduce_max(labels_per_column, axis=-1), 0) column_label = tf.where(no_cell_selected, tf.zeros_like(column_label), column_label) column_dist = tfp.distributions.Categorical(logits=column_logits) column_loss_per_example = -column_dist.log_prob(column_label) # Reduce the labels and logits to per-cell from per-token. logits_per_cell, _ = reduce_mean(token_logits, cell_index) labels_per_cell, labels_index = reduce_max(tf.cast(labels, tf.int32), cell_index) # Mask for the selected column. column_id_for_cells = cell_index.project_inner(labels_index).indices column_mask = tf.cast(tf.equal(column_id_for_cells, tf.expand_dims(column_label, axis=1)), tf.float32) # Compute the log-likelihood for cells, but only for the selected column. cell_dist = tfp.distributions.Bernoulli(logits=logits_per_cell) cell_log_prob = cell_dist.log_prob(labels_per_cell) cell_loss = -tf.reduce_sum(cell_log_prob * column_mask * cell_mask, axis=1) # We need to normalize the loss by the number of cells in the column. cell_loss /= tf.reduce_sum(column_mask * cell_mask, axis=1) + EPSILON_ZERO_DIVISION selection_loss_per_example = column_loss_per_example selection_loss_per_example += tf.where(no_cell_selected, tf.zeros_like(selection_loss_per_example), cell_loss) # Set the probs outside the selected column (selected by the *model*) # to 0. This ensures backwards compatibility with models that select # cells from multiple columns. selected_column_id = tf.argmax(column_logits, axis=-1, output_type=tf.int32) selected_column_mask = tf.cast( tf.equal(column_id_for_cells, tf.expand_dims(selected_column_id, axis=-1)), tf.float32 ) # Never select cells with the special column id 0. selected_column_mask = tf.where( tf.equal(column_id_for_cells, 0), tf.zeros_like(selected_column_mask), selected_column_mask ) logits_per_cell += CLOSE_ENOUGH_TO_LOG_ZERO * (1.0 - cell_mask * selected_column_mask) logits = gather(logits_per_cell, cell_index) return selection_loss_per_example, logits def _calculate_aggregate_mask(answer, pooled_output, cell_selection_preference, labels, aggregation_classifier): """ Finds examples where the model should select cells with no aggregation. Returns a mask that determines for which examples should the model select answers directly from the table, without any aggregation function. If the answer is a piece of text the case is unambiguous as aggregation functions only apply to numbers. If the answer is a number but does not appear in the table then we must use some aggregation case. The ambiguous case is when the answer is a number that also appears in the table. In this case we use the aggregation function probabilities predicted by the model to decide whether to select or aggregate. The threshold for this is a hyperparameter *cell_selection_preference* Args: answer (`tf.Tensor` of shape `(batch_size, )`): Answer for every example in the batch. Nan if there is no scalar answer. pooled_output (`tf.Tensor` of shape `(batch_size, hidden_size)`): Output of the pooler (BertPooler) on top of the encoder layer. cell_selection_preference (`float`): Preference for cell selection in ambiguous cases. labels (`tf.Tensor` of shape `(batch_size, sequence_length)`): Labels per token. aggregation_classifier (`torch.nn.Linear`): Aggregation head Returns: aggregate_mask (`tf.Tensor` of shape `(batch_size,)`): A mask set to 1 for examples that should use aggregation functions. """ # tf.Tensor(batch_size,) aggregate_mask_init = tf.cast(tf.logical_not(tf.math.is_nan(answer)), tf.float32) logits_aggregation = aggregation_classifier(pooled_output) dist_aggregation = tfp.distributions.Categorical(logits=logits_aggregation) # Index 0 corresponds to "no aggregation". aggregation_ops_total_mass = tf.reduce_sum(dist_aggregation.probs_parameter()[:, 1:], axis=1) # Cell selection examples according to current model. is_pred_cell_selection = aggregation_ops_total_mass <= cell_selection_preference # Examples with non-empty cell selection supervision. is_cell_supervision_available = tf.reduce_sum(labels, axis=1) > 0 aggregate_mask = tf.where( tf.logical_and(is_pred_cell_selection, is_cell_supervision_available), tf.zeros_like(aggregate_mask_init, dtype=tf.float32), aggregate_mask_init, ) aggregate_mask = tf.stop_gradient(aggregate_mask) return aggregate_mask def _calculate_aggregation_loss_known( logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels ): """ Calculates aggregation loss when its type is known during training. In the weakly supervised setting, the only known information is that for cell selection examples, "no aggregation" should be predicted. For other examples (those that require aggregation), no loss is accumulated. In the setting where aggregation type is always known, standard cross entropy loss is accumulated for all examples Args: logits_aggregation (`tf.Tensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. aggregate_mask (`tf.Tensor` of shape `(batch_size, )`): A mask set to 1 for examples that should use aggregation functions. aggregation_labels (`tf.Tensor` of shape `(batch_size, )`): Aggregation function id for every example in the batch. use_answer_as_supervision (`bool`, *optional*): Whether to use the answer as the only supervision for aggregation examples. num_aggregation_labels (`int`, *optional*, defaults to 0): The number of aggregation operators to predict. Returns: aggregation_loss_known (`tf.Tensor` of shape `(batch_size,)`): Aggregation loss (when its type is known during training) per example. """ if use_answer_as_supervision: # Prepare "no aggregation" targets for cell selection examples. target_aggregation = tf.zeros_like(aggregate_mask, dtype=tf.int32) else: # Use aggregation supervision as the target. target_aggregation = aggregation_labels one_hot_labels = tf.one_hot(target_aggregation, depth=num_aggregation_labels, dtype=tf.float32) log_probs = tf.nn.log_softmax(logits_aggregation, axis=-1) # <float32>[batch_size] per_example_aggregation_intermediate = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) if use_answer_as_supervision: # Accumulate loss only for examples requiring cell selection # (no aggregation). return per_example_aggregation_intermediate * (1 - aggregate_mask) else: return per_example_aggregation_intermediate def _calculate_aggregation_loss_unknown(logits_aggregation, aggregate_mask): """ Calculates aggregation loss in the case of answer supervision. Args: logits_aggregation (`tf.Tensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. aggregate_mask (`tf.Tensor` of shape `(batch_size, )`): A mask set to 1 for examples that should use aggregation functions Returns: aggregation_loss_unknown (`tf.Tensor` of shape `(batch_size,)`): Aggregation loss (in case of answer supervision) per example. """ dist_aggregation = tfp.distributions.Categorical(logits=logits_aggregation) # Index 0 corresponds to "no aggregation". aggregation_ops_total_mass = tf.reduce_sum(dist_aggregation.probs_parameter()[:, 1:], axis=1) # Predict some aggregation in case of an answer that needs aggregation. # This increases the probability of all aggregation functions, in a way # similar to MML, but without considering whether the function gives the # correct answer. return -tf.math.log(aggregation_ops_total_mass) * aggregate_mask def _calculate_aggregation_loss( logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels, aggregation_loss_weight, ): """ Calculates the aggregation loss per example. Args: logits_aggregation (`tf.Tensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. aggregate_mask (`tf.Tensor` of shape `(batch_size, )`): A mask set to 1 for examples that should use aggregation functions. aggregation_labels (`tf.Tensor` of shape `(batch_size, )`): Aggregation function id for every example in the batch. use_answer_as_supervision (`bool`, *optional*): Whether to use the answer as the only supervision for aggregation examples. num_aggregation_labels (`int`, *optional*, defaults to 0): The number of aggregation operators to predict. aggregation_loss_weight (`float`, *optional*, defaults to 1.0): Importance weight for the aggregation loss. Returns: aggregation_loss (`tf.Tensor` of shape `(batch_size,)`): Aggregation loss per example. """ per_example_aggregation_loss = _calculate_aggregation_loss_known( logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels ) if use_answer_as_supervision: # Add aggregation loss for numeric answers that need aggregation. per_example_aggregation_loss += _calculate_aggregation_loss_unknown(logits_aggregation, aggregate_mask) return aggregation_loss_weight * per_example_aggregation_loss def _calculate_expected_result( dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config ): """ Calculates the expected result given cell and aggregation probabilities. Args: dist_per_cell (`tfp.distributions.Bernoulli`): Cell selection distribution for each cell. numeric_values (`tf.Tensor` of shape `(batch_size, seq_length)`): Numeric values of every token. Nan for tokens which are not numeric values. numeric_values_scale (`tf.Tensor` of shape `(batch_size, seq_length)`): Scale of the numeric values of every token. input_mask_float (`tf.Tensor` of shape `(batch_size, seq_length)`): Mask for the table, without question tokens and table headers. logits_aggregation (`tf.Tensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. config ([`TapasConfig`]): Model configuration class with all the hyperparameters of the model Returns: expected_result (`tf.Tensor` of shape `(batch_size,)`): The expected result per example. """ if config.use_gumbel_for_cells: gumbel_dist = tfp.distributions.RelaxedBernoulli( # The token logits where already divided by the temperature and used for # computing cell selection errors so we need to multiply it again here config.temperature, logits=dist_per_cell.logits_parameter() * config.temperature, ) scaled_probability_per_cell = gumbel_dist.sample() else: scaled_probability_per_cell = dist_per_cell.probs_parameter() # <float32>[batch_size, seq_length] scaled_probability_per_cell = (scaled_probability_per_cell / numeric_values_scale) * input_mask_float count_result = tf.reduce_sum(scaled_probability_per_cell, axis=1) numeric_values_masked = tf.where( tf.math.is_nan(numeric_values), tf.zeros_like(numeric_values), numeric_values ) # Mask non-numeric table values to zero. sum_result = tf.reduce_sum(scaled_probability_per_cell * numeric_values_masked, axis=1) avg_approximation = config.average_approximation_function if avg_approximation == AverageApproximationFunction.RATIO: average_result = sum_result / (count_result + EPSILON_ZERO_DIVISION) elif avg_approximation == AverageApproximationFunction.FIRST_ORDER: # The sum of all probabilities exept that correspond to other cells ex = tf.reduce_sum(scaled_probability_per_cell, axis=1, keepdims=True) - scaled_probability_per_cell + 1 average_result = tf.reduce_sum(numeric_values_masked * scaled_probability_per_cell / ex, axis=1) elif avg_approximation == AverageApproximationFunction.SECOND_ORDER: # The sum of all probabilities exept that correspond to other cells ex = tf.reduce_sum(scaled_probability_per_cell, axis=1, keepdims=True) - scaled_probability_per_cell + 1 pointwise_var = scaled_probability_per_cell * (1 - scaled_probability_per_cell) var = tf.reduce_sum(pointwise_var, axis=1, keepdims=True) - pointwise_var multiplier = (var / tf.math.square(ex) + 1) / ex average_result = tf.reduce_sum(numeric_values_masked * scaled_probability_per_cell * multiplier, axis=1) else: raise ValueError("Invalid average_approximation_function: %s", config.average_approximation_function) if config.use_gumbel_for_aggregation: gumbel_dist = tfp.distributions.RelaxedOneHotCategorical( config.aggregation_temperature, logits=logits_aggregation[:, 1:] ) # <float32>[batch_size, num_aggregation_labels - 1] aggregation_op_only_probs = gumbel_dist.sample() else: # <float32>[batch_size, num_aggregation_labels - 1] aggregation_op_only_probs = stable_softmax(logits_aggregation[:, 1:] / config.aggregation_temperature, axis=-1) all_results = tf.concat( [ tf.expand_dims(sum_result, axis=1), tf.expand_dims(average_result, axis=1), tf.expand_dims(count_result, axis=1), ], axis=1, ) expected_result = tf.reduce_sum(all_results * aggregation_op_only_probs, axis=1) return expected_result def _calculate_regression_loss( answer, aggregate_mask, dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config, ): """ Calculates the regression loss per example. Args: answer (`tf.Tensor` of shape `(batch_size,)`): Answer for every example in the batch. Nan if there is no scalar answer. aggregate_mask (`tf.Tensor` of shape `(batch_size,)`): A mask set to 1 for examples that should use aggregation functions. dist_per_cell (`torch.distributions.Bernoulli`): Cell selection distribution for each cell. numeric_values (`tf.Tensor` of shape `(batch_size, seq_length)`): Numeric values of every token. Nan for tokens which are not numeric values. numeric_values_scale (`tf.Tensor` of shape `(batch_size, seq_length)`): Scale of the numeric values of every token. input_mask_float (`tf.Tensor` of shape `(batch_size, seq_length)`): Mask for the table, without question tokens and table headers. logits_aggregation (`tf.Tensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. config ([`TapasConfig`]): Model configuration class with all the parameters of the model Returns: per_example_answer_loss_scaled (`tf.Tensor` of shape `(batch_size,)`): Scales answer loss for each example in the batch. large_answer_loss_mask (`tf.Tensor` of shape `(batch_size,)`): A mask which is 1 for examples for which their answer loss is larger than the answer_loss_cutoff. """ # float32 (batch_size,) expected_result = _calculate_expected_result( dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config ) # <float32>[batch_size] answer_masked = tf.where(tf.math.is_nan(answer), tf.zeros_like(answer), answer) if config.use_normalized_answer_loss: normalizer = tf.stop_gradient( tf.math.maximum(tf.math.abs(expected_result), tf.math.abs(answer_masked)) + EPSILON_ZERO_DIVISION ) normalized_answer_masked = answer_masked / normalizer normalized_expected_result = expected_result / normalizer per_example_answer_loss = tf.compat.v1.losses.huber_loss( normalized_answer_masked * aggregate_mask, normalized_expected_result * aggregate_mask, delta=tf.cast(1.0, tf.float32), reduction=tf.losses.Reduction.NONE, ) else: per_example_answer_loss = tf.compat.v1.losses.huber_loss( answer_masked * aggregate_mask, expected_result * aggregate_mask, delta=tf.cast(config.huber_loss_delta, tf.float32), reduction=tf.losses.Reduction.NONE, ) if config.answer_loss_cutoff is None: large_answer_loss_mask = tf.ones_like(per_example_answer_loss, dtype=tf.float32) else: large_answer_loss_mask = tf.where( per_example_answer_loss > config.answer_loss_cutoff, tf.zeros_like(per_example_answer_loss, dtype=tf.float32), tf.ones_like(per_example_answer_loss, dtype=tf.float32), ) per_example_answer_loss_scaled = config.answer_loss_importance * (per_example_answer_loss * aggregate_mask) return per_example_answer_loss_scaled, large_answer_loss_mask
# coding=utf-8 # Copyright 2021 Google Research and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF 2.0 TAPAS model.""" import enum import math from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import ( TFBaseModelOutputWithPastAndCrossAttentions, TFBaseModelOutputWithPooling, TFMaskedLMOutput, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import ( TFMaskedLanguageModelingLoss, TFModelInputType, TFPreTrainedModel, TFSequenceClassificationLoss, get_initializer, keras_serializable, unpack_inputs, ) from ...tf_utils import shape_list, stable_softmax from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, is_tensorflow_probability_available, logging, replace_return_docstrings, requires_backends, ) from .configuration_tapas import TapasConfig logger = logging.get_logger(__name__) # soft dependency if is_tensorflow_probability_available(): try: import tensorflow_probability as tfp # On the first call, check whether a compatible version of TensorFlow is installed # TensorFlow Probability depends on a recent stable release of TensorFlow n = tfp.distributions.Normal(loc=0.0, scale=1.0) except ImportError: logger.error( "TAPAS models are not usable since `tensorflow_probability` can't be loaded." "It seems you have `tensorflow_probability` installed with the wrong tensorflow version." "Please try to reinstall it following the instructions here: https://github.com/tensorflow/probability." ) _CONFIG_FOR_DOC = "TapasConfig" _TOKENIZER_FOR_DOC = "TapasTokenizer" _CHECKPOINT_FOR_DOC = "google/tapas-base" TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST = [ # large models "google/tapas-large", "google/tapas-large-finetuned-sqa", "google/tapas-large-finetuned-wtq", "google/tapas-large-finetuned-wikisql-supervised", "google/tapas-large-finetuned-tabfact", # base models "google/tapas-base", "google/tapas-base-finetuned-sqa", "google/tapas-base-finetuned-wtq", "google/tapas-base-finetuned-wikisql-supervised", "google/tapas-base-finetuned-tabfact", # small models "google/tapas-small", "google/tapas-small-finetuned-sqa", "google/tapas-small-finetuned-wtq", "google/tapas-small-finetuned-wikisql-supervised", "google/tapas-small-finetuned-tabfact", # mini models "google/tapas-mini", "google/tapas-mini-finetuned-sqa", "google/tapas-mini-finetuned-wtq", "google/tapas-mini-finetuned-wikisql-supervised", "google/tapas-mini-finetuned-tabfact", # tiny models "google/tapas-tiny", "google/tapas-tiny-finetuned-sqa", "google/tapas-tiny-finetuned-wtq", "google/tapas-tiny-finetuned-wikisql-supervised", "google/tapas-tiny-finetuned-tabfact", # See all TAPAS models at https://huggingface.co/models?filter=tapas ] EPSILON_ZERO_DIVISION = 1e-10 CLOSE_ENOUGH_TO_LOG_ZERO = -10000.0 @dataclass class TFTableQuestionAnsweringOutput(ModelOutput): """ Output type of [`TFTapasForQuestionAnswering`]. Args: loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` (and possibly `answer`, `aggregation_labels`, `numeric_values` and `numeric_values_scale` are provided)): Total loss as the sum of the hierarchical cell selection log-likelihood loss and (optionally) the semi-supervised regression loss and (optionally) supervised loss for aggregations. logits (`tf.Tensor` of shape `(batch_size, sequence_length)`): Prediction scores of the cell selection head, for every token. logits_aggregation (`tf.Tensor`, *optional*, of shape `(batch_size, num_aggregation_labels)`): Prediction scores of the aggregation head, for every aggregation operator. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[tf.Tensor] = None logits: tf.Tensor = None logits_aggregation: Optional[tf.Tensor] = None hidden_states: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[tf.Tensor]] = None class TFTapasEmbeddings(tf.keras.layers.Layer): """ Construct the embeddings from word, position and token_type embeddings. Same as BertEmbeddings but with a number of additional token type embeddings to encode tabular structure. """ def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.vocab_size = config.vocab_size self.type_vocab_sizes = config.type_vocab_sizes self.number_of_token_type_embeddings = len(config.type_vocab_sizes) self.reset_position_index_per_cell = config.reset_position_index_per_cell self.hidden_size = config.hidden_size self.max_position_embeddings = config.max_position_embeddings self.initializer_range = config.initializer_range self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) def build(self, input_shape: tf.TensorShape): with tf.name_scope("word_embeddings"): self.weight = self.add_weight( name="weight", shape=[self.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range), ) with tf.name_scope("position_embeddings"): self.position_embeddings = self.add_weight( name="embeddings", shape=[self.max_position_embeddings, self.hidden_size], initializer=get_initializer(self.initializer_range), ) for i, type_vocab_size in enumerate(self.type_vocab_sizes): with tf.name_scope(f"token_type_embeddings_{i}"): setattr( self, f"token_type_embeddings_{i}", self.add_weight( name="embeddings", shape=[type_vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range), ), ) super().build(input_shape) def call( self, input_ids: tf.Tensor = None, position_ids: tf.Tensor = None, token_type_ids: tf.Tensor = None, inputs_embeds: tf.Tensor = None, training: bool = False, ) -> tf.Tensor: """ Applies embedding based on inputs tensor. Returns: final_embeddings (`tf.Tensor`): output embedding tensor. """ assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: input_shape = shape_list(input_ids) else: input_shape = shape_list(inputs_embeds)[:-1] seq_length = input_shape[1] if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape + [self.number_of_token_type_embeddings], value=0) if position_ids is None: # create absolute position embeddings position_ids = tf.expand_dims(tf.range(start=0, limit=seq_length), axis=0) position_ids = tf.broadcast_to(position_ids, shape=input_shape) # when self.config.reset_position_index_per_cell is set to True, create relative position embeddings if self.reset_position_index_per_cell: # shape (batch_size, seq_len) col_index = IndexMap(token_type_ids[:, :, 1], self.type_vocab_sizes[1], batch_dims=1) # shape (batch_size, seq_len) row_index = IndexMap(token_type_ids[:, :, 2], self.type_vocab_sizes[2], batch_dims=1) # shape (batch_size, seq_len) full_index = ProductIndexMap(col_index, row_index) # shape (max_rows * max_columns,). First absolute position for every cell first_position_per_segment = reduce_min(position_ids, full_index)[0] # ? shape (batch_size, seq_len). First absolute position of the cell for every token first_position = gather(first_position_per_segment, full_index) # shape (1, seq_len) position = tf.expand_dims(tf.range(start=0, limit=seq_length), axis=0) position_ids = tf.math.minimum(self.max_position_embeddings - 1, position - first_position) if input_ids is not None: inputs_embeds = tf.gather(params=self.weight, indices=input_ids) position_embeddings = tf.gather(self.position_embeddings, indices=position_ids) final_embeddings = inputs_embeds + position_embeddings for i in range(self.number_of_token_type_embeddings): name = f"token_type_embeddings_{i}" final_embeddings += tf.gather(params=getattr(self, name), indices=token_type_ids[:, :, i]) final_embeddings = self.LayerNorm(inputs=final_embeddings) final_embeddings = self.dropout(inputs=final_embeddings, training=training) return final_embeddings # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention with Bert->Tapas class TFTapasSelfAttention(tf.keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number " f"of attention heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.sqrt_att_head_size = math.sqrt(self.attention_head_size) self.query = tf.keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query" ) self.key = tf.keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key" ) self.value = tf.keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value" ) self.dropout = tf.keras.layers.Dropout(rate=config.attention_probs_dropout_prob) self.is_decoder = config.is_decoder def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor: # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size] tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size)) # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size] return tf.transpose(tensor, perm=[0, 2, 1, 3]) def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor, encoder_attention_mask: tf.Tensor, past_key_value: Tuple[tf.Tensor], output_attentions: bool, training: bool = False, ) -> Tuple[tf.Tensor]: batch_size = shape_list(hidden_states)[0] mixed_query_layer = self.query(inputs=hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(inputs=encoder_hidden_states), batch_size) value_layer = self.transpose_for_scores(self.value(inputs=encoder_hidden_states), batch_size) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size) value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size) key_layer = tf.concat([past_key_value[0], key_layer], axis=2) value_layer = tf.concat([past_key_value[1], value_layer], axis=2) else: key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size) value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) if self.is_decoder: # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. # (batch size, num_heads, seq_len_q, seq_len_k) attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype) attention_scores = tf.divide(attention_scores, dk) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in TFTapasModel call() function) attention_scores = tf.add(attention_scores, attention_mask) # Normalize the attention scores to probabilities. attention_probs = stable_softmax(logits=attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(inputs=attention_probs, training=training) # Mask heads if we want to if head_mask is not None: attention_probs = tf.multiply(attention_probs, head_mask) attention_output = tf.matmul(attention_probs, value_layer) attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, all_head_size) attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size)) outputs = (attention_output, attention_probs) if output_attentions else (attention_output,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput with Bert->Tapas class TFTapasSelfOutput(tf.keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_tf_bert.TFBertAttention with Bert->Tapas class TFTapasAttention(tf.keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.self_attention = TFTapasSelfAttention(config, name="self") self.dense_output = TFTapasSelfOutput(config, name="output") def prune_heads(self, heads): raise NotImplementedError def call( self, input_tensor: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor, encoder_attention_mask: tf.Tensor, past_key_value: Tuple[tf.Tensor], output_attentions: bool, training: bool = False, ) -> Tuple[tf.Tensor]: self_outputs = self.self_attention( hidden_states=input_tensor, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, training=training, ) attention_output = self.dense_output( hidden_states=self_outputs[0], input_tensor=input_tensor, training=training ) # add attentions (possibly with past_key_value) if we output them outputs = (attention_output,) + self_outputs[1:] return outputs # Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate with Bert->Tapas class TFTapasIntermediate(tf.keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) if isinstance(config.hidden_act, str): self.intermediate_act_fn = get_tf_activation(config.hidden_act) else: self.intermediate_act_fn = config.hidden_act def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput with Bert->Tapas class TFTapasOutput(tf.keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_tf_bert.TFBertLayer with Bert->Tapas class TFTapasLayer(tf.keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.attention = TFTapasAttention(config, name="attention") self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = TFTapasAttention(config, name="crossattention") self.intermediate = TFTapasIntermediate(config, name="intermediate") self.bert_output = TFTapasOutput(config, name="output") def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: Optional[tf.Tensor], encoder_attention_mask: Optional[tf.Tensor], past_key_value: Optional[Tuple[tf.Tensor]], output_attentions: bool, training: bool = False, ) -> Tuple[tf.Tensor]: # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( input_tensor=hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=self_attn_past_key_value, output_attentions=output_attentions, training=training, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" " by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( input_tensor=attention_output, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, training=training, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value intermediate_output = self.intermediate(hidden_states=attention_output) layer_output = self.bert_output( hidden_states=intermediate_output, input_tensor=attention_output, training=training ) outputs = (layer_output,) + outputs # add attentions if we output them # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs # Copied from transformers.models.bert.modeling_tf_bert.TFBertEncoder with Bert->Tapas class TFTapasEncoder(tf.keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.config = config self.layer = [TFTapasLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)] def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: Optional[tf.Tensor], encoder_attention_mask: Optional[tf.Tensor], past_key_values: Optional[Tuple[Tuple[tf.Tensor]]], use_cache: Optional[bool], output_attentions: bool, output_hidden_states: bool, return_dict: bool, training: bool = False, ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) past_key_value = past_key_values[i] if past_key_values is not None else None layer_outputs = layer_module( hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask[i], encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, training=training, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if self.config.add_cross_attention and encoder_hidden_states is not None: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, all_hidden_states, all_attentions, all_cross_attentions] if v is not None ) return TFBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) # Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->Tapas class TFTapasPooler(tf.keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation="tanh", name="dense", ) def call(self, hidden_states: tf.Tensor) -> tf.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(inputs=first_token_tensor) return pooled_output # Copied from transformers.models.bert.modeling_tf_bert.TFBertPredictionHeadTransform with Bert->Tapas class TFTapasPredictionHeadTransform(tf.keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) if isinstance(config.hidden_act, str): self.transform_act_fn = get_tf_activation(config.hidden_act) else: self.transform_act_fn = config.hidden_act self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(inputs=hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_tf_bert.TFBertLMPredictionHead with Bert->Tapas class TFTapasLMPredictionHead(tf.keras.layers.Layer): def __init__(self, config: TapasConfig, input_embeddings: tf.keras.layers.Layer, **kwargs): super().__init__(**kwargs) self.vocab_size = config.vocab_size self.hidden_size = config.hidden_size self.transform = TFTapasPredictionHeadTransform(config, name="transform") # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.input_embeddings = input_embeddings def build(self, input_shape: tf.TensorShape): self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias") super().build(input_shape) def get_output_embeddings(self) -> tf.keras.layers.Layer: return self.input_embeddings def set_output_embeddings(self, value: tf.Variable): self.input_embeddings.weight = value self.input_embeddings.vocab_size = shape_list(value)[0] def get_bias(self) -> Dict[str, tf.Variable]: return {"bias": self.bias} def set_bias(self, value: tf.Variable): self.bias = value["bias"] self.vocab_size = shape_list(value["bias"])[0] def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.transform(hidden_states=hidden_states) seq_length = shape_list(hidden_states)[1] hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size]) hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True) hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.vocab_size]) hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias) return hidden_states # Copied from transformers.models.bert.modeling_tf_bert.TFBertMLMHead with Bert->Tapas class TFTapasMLMHead(tf.keras.layers.Layer): def __init__(self, config: TapasConfig, input_embeddings: tf.keras.layers.Layer, **kwargs): super().__init__(**kwargs) self.predictions = TFTapasLMPredictionHead(config, input_embeddings, name="predictions") def call(self, sequence_output: tf.Tensor) -> tf.Tensor: prediction_scores = self.predictions(hidden_states=sequence_output) return prediction_scores @keras_serializable class TFTapasMainLayer(tf.keras.layers.Layer): config_class = TapasConfig def __init__(self, config: TapasConfig, add_pooling_layer: bool = True, **kwargs): requires_backends(self, "tensorflow_probability") super().__init__(**kwargs) self.config = config self.embeddings = TFTapasEmbeddings(config, name="embeddings") self.encoder = TFTapasEncoder(config, name="encoder") self.pooler = TFTapasPooler(config, name="pooler") if add_pooling_layer else None def get_input_embeddings(self) -> tf.keras.layers.Layer: return self.embeddings def set_input_embeddings(self, value: tf.Variable): self.embeddings.weight = value self.embeddings.vocab_size = shape_list(value)[0] def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ raise NotImplementedError @unpack_inputs def call( self, input_ids: Optional[TFModelInputType] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if attention_mask is None: attention_mask = tf.fill(dims=input_shape, value=1) if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape + [len(self.config.type_vocab_sizes)], value=0) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, training=training, ) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1])) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype) one_cst = tf.constant(1.0, dtype=embedding_output.dtype) ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype) extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.config.num_hidden_layers encoder_outputs = self.encoder( hidden_states=embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None if not return_dict: return ( sequence_output, pooled_output, ) + encoder_outputs[1:] return TFBaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class TFTapasPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = TapasConfig base_model_prefix = "tapas" @tf.function( input_signature=[ { "input_ids": tf.TensorSpec((None, None), tf.int64, name="input_ids"), "attention_mask": tf.TensorSpec((None, None), tf.float32, name="attention_mask"), "token_type_ids": tf.TensorSpec((None, None, None), tf.int64, name="token_type_ids"), } ] ) def serving(self, inputs): output = self.call(inputs) return self.serving_output(output) TAPAS_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Parameters: config ([`TapasConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ TAPAS_INPUTS_DOCSTRING = r""" Args: input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`TapasTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0}, 7)`, *optional*): Token indices that encode tabular structure. Indices can be obtained using [`TapasTokenizer`]. See this class for more info. [What are token type IDs?](../glossary#token-type-ids) position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. If `reset_position_index_per_cell` of [`TapasConfig`] is set to `True`, relative position embeddings will be used. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`np.ndarray` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False``): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare Tapas Model transformer outputting raw hidden-states without any specific head on top.", TAPAS_START_DOCSTRING, ) class TFTapasModel(TFTapasPreTrainedModel): def __init__(self, config: TapasConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.tapas = TFTapasMainLayer(config, name="tapas") @unpack_inputs @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: Optional[TFModelInputType] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: r""" Returns: Examples: ```python >>> from transformers import TapasTokenizer, TapasModel >>> import pandas as pd >>> tokenizer = TapasTokenizer.from_pretrained("google/tapas-base") >>> model = TapasModel.from_pretrained("google/tapas-base") >>> data = { ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], ... "Age": ["56", "45", "59"], ... "Number of movies": ["87", "53", "69"], ... } >>> table = pd.DataFrame.from_dict(data) >>> queries = ["How many movies has George Clooney played in?", "How old is Brad Pitt?"] >>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="tf") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ```""" outputs = self.tapas( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def serving_output(self, output: TFBaseModelOutputWithPooling) -> TFBaseModelOutputWithPooling: hidden_states = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attentions = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFBaseModelOutputWithPooling( last_hidden_state=output.last_hidden_state, pooler_output=output.pooler_output, hidden_states=hidden_states, attentions=attentions, ) @add_start_docstrings("""Tapas Model with a `language modeling` head on top.""", TAPAS_START_DOCSTRING) class TFTapasForMaskedLM(TFTapasPreTrainedModel, TFMaskedLanguageModelingLoss): def __init__(self, config: TapasConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) if config.is_decoder: logger.warning( "If you want to use `TFTapasForMaskedLM` make sure `config.is_decoder=False` for " "bi-directional self-attention." ) self.tapas = TFTapasMainLayer(config, add_pooling_layer=False, name="tapas") self.lm_head = TFTapasMLMHead(config, input_embeddings=self.tapas.embeddings, name="cls") def get_lm_head(self) -> tf.keras.layers.Layer: return self.lm_head.predictions @unpack_inputs @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: Optional[TFModelInputType] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[Union[np.ndarray, tf.Tensor]] = None, training: Optional[bool] = False, ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` Returns: Examples: ```python >>> from transformers import TapasTokenizer, TapasForMaskedLM >>> import pandas as pd >>> tokenizer = TapasTokenizer.from_pretrained("google/tapas-base") >>> model = TapasForMaskedLM.from_pretrained("google/tapas-base") >>> data = { ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], ... "Age": ["56", "45", "59"], ... "Number of movies": ["87", "53", "69"], ... } >>> table = pd.DataFrame.from_dict(data) >>> inputs = tokenizer( ... table=table, queries="How many [MASK] has George [MASK] played in?", return_tensors="tf" ... ) >>> labels = tokenizer( ... table=table, queries="How many movies has George Clooney played in?", return_tensors="tf" ... )["input_ids"] >>> outputs = model(**inputs, labels=labels) >>> logits = outputs.logits ```""" outputs = self.tapas( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=prediction_scores) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFMaskedLMOutput( loss=loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def serving_output(self, output: TFMaskedLMOutput) -> TFMaskedLMOutput: hidden_states = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attentions = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFMaskedLMOutput(logits=output.logits, hidden_states=hidden_states, attentions=attentions) class TFTapasComputeTokenLogits(tf.keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.temperature = config.temperature # cell selection heads with tf.name_scope("output"): self.output_weights = self.add_weight( name="output_weights", shape=(config.hidden_size,), dtype=tf.float32, trainable=True, initializer=tf.zeros_initializer() if config.init_cell_selection_weights_to_zero else tf.keras.initializers.TruncatedNormal(stddev=config.initializer_range), ) self.output_bias = self.add_weight( name="output_bias", shape=(), trainable=True, initializer=tf.zeros_initializer() ) def call(self, sequence_output: tf.Tensor) -> tf.Tensor: """ Computes logits per token Args: sequence_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Also known as last_hidden_state. Sequence of hidden-states at the output of the last layer of the model. Returns: logits (`tf.Tensor` of shape `(batch_size, sequence_length)`): Logits per token. """ logits = (tf.einsum("bsj,j->bs", sequence_output, self.output_weights) + self.output_bias) / self.temperature return logits class TFTapasComputeColumnLogits(tf.keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) with tf.name_scope("column_output"): self.column_output_weights = self.add_weight( name="column_output_weights", shape=[config.hidden_size], dtype=tf.float32, trainable=True, initializer=tf.zeros_initializer() if config.init_cell_selection_weights_to_zero else tf.keras.initializers.TruncatedNormal(stddev=config.initializer_range), ) self.column_output_bias = self.add_weight( name="column_output_bias", shape=(), trainable=True, initializer=tf.zeros_initializer() ) def call(self, sequence_output, cell_index, cell_mask, allow_empty_column_selection) -> tf.Tensor: """ Computes the column logits. Args: sequence_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Also known as last_hidden_state. Sequence of hidden-states at the output of the last layer of the model. cell_index (`ProductIndexMap`): Index that groups tokens into cells. cell_mask (`tf.Tensor` of shape `(batch_size, max_num_rows * max_num_cols)`): Mask for cells that exist in the table (i.e. that are not padding). allow_empty_column_selection (`bool`): Whether to allow not to select any column Returns: column_logits (`tf.Tensor`of shape `(batch_size, max_num_cols)`): Tensor containing the column logits for every example in the batch. """ # First, compute the token logits (batch_size, seq_len) - without temperature token_logits = tf.einsum("bsj,j->bs", sequence_output, self.column_output_weights) + self.column_output_bias # Next, average the logits per cell (batch_size, max_num_cols*max_num_rows) cell_logits, cell_logits_index = reduce_mean(token_logits, cell_index) # Finally, average the logits per column (batch_size, max_num_cols) column_index = cell_index.project_inner(cell_logits_index) column_logits, out_index = reduce_sum(cell_logits * cell_mask, column_index) cell_count, _ = reduce_sum(cell_mask, column_index) column_logits /= cell_count + EPSILON_ZERO_DIVISION # Mask columns that do not appear in the example. is_padding = tf.logical_and(cell_count < 0.5, tf.not_equal(out_index.indices, 0)) column_logits += CLOSE_ENOUGH_TO_LOG_ZERO * tf.cast(is_padding, tf.float32) if not allow_empty_column_selection: column_logits += CLOSE_ENOUGH_TO_LOG_ZERO * tf.cast(tf.equal(out_index.indices, 0), tf.float32) return column_logits @add_start_docstrings( """ Tapas Model with a cell selection head and optional aggregation head on top for question-answering tasks on tables (linear layers on top of the hidden-states output to compute `logits` and optional `logits_aggregation`), e.g. for SQA, WTQ or WikiSQL-supervised tasks. """, TAPAS_START_DOCSTRING, ) class TFTapasForQuestionAnswering(TFTapasPreTrainedModel): def __init__(self, config: TapasConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) # base model self.tapas = TFTapasMainLayer(config, name="tapas") # dropout self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) self.compute_token_logits = TFTapasComputeTokenLogits(config, name="compute_token_logits") self.compute_column_logits = TFTapasComputeColumnLogits(config, name="compute_column_logits") if config.num_aggregation_labels > 0: self.aggregation_classifier = tf.keras.layers.Dense( config.num_aggregation_labels, kernel_initializer=get_initializer(config.initializer_range), name="aggregation_classifier", ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFTableQuestionAnsweringOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: Optional[TFModelInputType] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, table_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, aggregation_labels: Optional[Union[np.ndarray, tf.Tensor]] = None, float_answer: Optional[Union[np.ndarray, tf.Tensor]] = None, numeric_values: Optional[Union[np.ndarray, tf.Tensor]] = None, numeric_values_scale: Optional[Union[np.ndarray, tf.Tensor]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[Union[np.ndarray, tf.Tensor]] = None, training: Optional[bool] = False, ) -> Union[TFTableQuestionAnsweringOutput, Tuple[tf.Tensor]]: r""" table_mask (`tf.Tensor` of shape `(batch_size, seq_length)`, *optional*): Mask for the table. Indicates which tokens belong to the table (1). Question tokens, table headers and padding are 0. labels (`tf.Tensor` of shape `(batch_size, seq_length)`, *optional*): Labels per token for computing the hierarchical cell selection loss. This encodes the positions of the answer appearing in the table. Can be obtained using [`TapasTokenizer`]. - 1 for tokens that are **part of the answer**, - 0 for tokens that are **not part of the answer**. aggregation_labels (`tf.Tensor` of shape `(batch_size, )`, *optional*): Aggregation function index for every example in the batch for computing the aggregation loss. Indices should be in `[0, ..., config.num_aggregation_labels - 1]`. Only required in case of strong supervision for aggregation (WikiSQL-supervised). float_answer (`tf.Tensor` of shape `(batch_size, )`, *optional*): Float answer for every example in the batch. Set to *float('nan')* for cell selection questions. Only required in case of weak supervision (WTQ) to calculate the aggregate mask and regression loss. numeric_values (`tf.Tensor` of shape `(batch_size, seq_length)`, *optional*): Numeric values of every token, NaN for tokens which are not numeric values. Can be obtained using [`TapasTokenizer`]. Only required in case of weak supervision for aggregation (WTQ) to calculate the regression loss. numeric_values_scale (`tf.Tensor` of shape `(batch_size, seq_length)`, *optional*): Scale of the numeric values of every token. Can be obtained using [`TapasTokenizer`]. Only required in case of weak supervision for aggregation (WTQ) to calculate the regression loss. Returns: Examples: ```python >>> from transformers import TapasTokenizer, TapasForQuestionAnswering >>> import pandas as pd >>> tokenizer = TapasTokenizer.from_pretrained("google/tapas-base-finetuned-wtq") >>> model = TapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wtq") >>> data = { ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], ... "Age": ["56", "45", "59"], ... "Number of movies": ["87", "53", "69"], ... } >>> table = pd.DataFrame.from_dict(data) >>> queries = ["How many movies has George Clooney played in?", "How old is Brad Pitt?"] >>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="tf") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> logits_aggregation = outputs.logits_aggregation ```""" outputs = self.tapas( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] pooled_output = outputs[1] sequence_output = self.dropout(sequence_output) if input_ids is not None: input_shape = shape_list(input_ids) else: input_shape = shape_list(inputs_embeds)[:-1] # Construct indices for the table. if token_type_ids is None: token_type_ids = tf.fill(input_shape + [len(self.config.type_vocab_sizes)], 0) token_types = [ "segment_ids", "column_ids", "row_ids", "prev_labels", "column_ranks", "inv_column_ranks", "numeric_relations", ] row_ids = token_type_ids[:, :, token_types.index("row_ids")] column_ids = token_type_ids[:, :, token_types.index("column_ids")] # Construct indices for the table. row_index = IndexMap( indices=tf.minimum(tf.cast(row_ids, tf.int32), self.config.max_num_rows - 1), num_segments=self.config.max_num_rows, batch_dims=1, ) col_index = IndexMap( indices=tf.minimum(tf.cast(column_ids, tf.int32), self.config.max_num_columns - 1), num_segments=self.config.max_num_columns, batch_dims=1, ) cell_index = ProductIndexMap(row_index, col_index) # Masks. input_shape = shape_list(input_ids) if input_ids is not None else shape_list(inputs_embeds)[:-1] if attention_mask is None: attention_mask = tf.ones(input_shape) # Table cells only, without question tokens and table headers. if table_mask is None: table_mask = tf.where(row_ids > 0, tf.ones_like(row_ids), tf.zeros_like(row_ids)) # <float32>[batch_size, seq_length] input_mask_float = tf.cast(attention_mask, tf.float32) table_mask_float = tf.cast(table_mask, tf.float32) # Mask for cells that exist in the table (i.e. that are not padding). cell_mask, _ = reduce_mean(input_mask_float, cell_index) # Compute logits per token. These are used to select individual cells. logits = self.compute_token_logits(sequence_output) # Compute logits per column. These are used to select a column. column_logits = None if self.config.select_one_column: column_logits = self.compute_column_logits( sequence_output, cell_index, cell_mask, self.config.allow_empty_column_selection ) # Aggregate logits. logits_aggregation = None if self.config.num_aggregation_labels > 0: logits_aggregation = self.aggregation_classifier(pooled_output) # Total loss calculation total_loss = tf.zeros(shape=(1,), dtype=tf.float32) calculate_loss = False if labels is not None: calculate_loss = True is_supervised = not self.config.num_aggregation_labels > 0 or not self.config.use_answer_as_supervision # Semi-supervised cell selection in case of no aggregation: # If the answer (the denotation) appears directly in the table we might # select the answer without applying any aggregation function. There are # some ambiguous cases, see utils._calculate_aggregate_mask for more info. # `aggregate_mask` is 1 for examples where we chose to aggregate and 0 # for examples where we chose to select the answer directly. # `labels` encodes the positions of the answer appearing in the table. if is_supervised: aggregate_mask = None else: if float_answer is not None: assert ( shape_list(labels)[0] == shape_list(float_answer)[0] ), "Make sure the answers are a FloatTensor of shape (batch_size,)" # <float32>[batch_size] aggregate_mask = _calculate_aggregate_mask( float_answer, pooled_output, self.config.cell_selection_preference, labels, self.aggregation_classifier, ) else: aggregate_mask = None raise ValueError("You have to specify float answers in order to calculate the aggregate mask") # Cell selection log-likelihood if self.config.average_logits_per_cell: logits_per_cell, _ = reduce_mean(logits, cell_index) logits = gather(logits_per_cell, cell_index) dist_per_token = tfp.distributions.Bernoulli(logits=logits) # Compute cell selection loss per example. selection_loss_per_example = None if not self.config.select_one_column: weight = tf.where( labels == 0, tf.ones_like(labels, dtype=tf.float32), self.config.positive_label_weight * tf.ones_like(labels, dtype=tf.float32), ) selection_loss_per_token = -dist_per_token.log_prob(labels) * weight selection_loss_per_example = tf.reduce_sum(selection_loss_per_token * input_mask_float, axis=1) / ( tf.reduce_sum(input_mask_float, axis=1) + EPSILON_ZERO_DIVISION ) else: selection_loss_per_example, logits = _single_column_cell_selection_loss( logits, column_logits, labels, cell_index, col_index, cell_mask ) dist_per_token = tfp.distributions.Bernoulli(logits=logits) # Supervised cell selection if self.config.disable_per_token_loss: pass elif is_supervised: total_loss += tf.reduce_mean(selection_loss_per_example) else: # For the not supervised case, do not assign loss for cell selection total_loss += tf.reduce_mean(selection_loss_per_example * (1.0 - aggregate_mask)) # Semi-supervised regression loss and supervised loss for aggregations if self.config.num_aggregation_labels > 0: if is_supervised: # Note that `aggregate_mask` is None if the setting is supervised. if aggregation_labels is not None: assert ( shape_list(labels)[0] == shape_list(aggregation_labels)[0] ), "Make sure the aggregation labels are a LongTensor of shape (batch_size,)" per_example_additional_loss = _calculate_aggregation_loss( logits_aggregation, aggregate_mask, aggregation_labels, self.config.use_answer_as_supervision, self.config.num_aggregation_labels, self.config.aggregation_loss_weight, ) else: raise ValueError( "You have to specify aggregation labels in order to calculate the aggregation loss" ) else: aggregation_labels = tf.zeros(shape_list(labels)[0], dtype=tf.int32) per_example_additional_loss = _calculate_aggregation_loss( logits_aggregation, aggregate_mask, aggregation_labels, self.config.use_answer_as_supervision, self.config.num_aggregation_labels, self.config.aggregation_loss_weight, ) if self.config.use_answer_as_supervision: if numeric_values is not None and numeric_values_scale is not None: assert shape_list(numeric_values) == shape_list(numeric_values_scale) # Add regression loss for numeric answers which require aggregation. answer_loss, large_answer_loss_mask = _calculate_regression_loss( float_answer, aggregate_mask, dist_per_token, numeric_values, numeric_values_scale, table_mask_float, logits_aggregation, self.config, ) per_example_additional_loss += answer_loss # Zero loss for examples with answer_loss > cutoff. per_example_additional_loss *= large_answer_loss_mask else: raise ValueError( "You have to specify numeric values and numeric values scale in order to calculate the" " regression loss" ) total_loss += tf.reduce_mean(per_example_additional_loss) else: # if no label ids are provided, set them to zeros in order to properly compute logits labels = tf.zeros_like(logits) _, logits = _single_column_cell_selection_loss( logits, column_logits, labels, cell_index, col_index, cell_mask ) if not return_dict: output = (logits, logits_aggregation) + outputs[2:] return ((total_loss,) + output) if calculate_loss else output return TFTableQuestionAnsweringOutput( loss=total_loss if calculate_loss else None, logits=logits, logits_aggregation=logits_aggregation, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def serving_output(self, output: TFTableQuestionAnsweringOutput) -> TFTableQuestionAnsweringOutput: hidden_states = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attentions = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFTableQuestionAnsweringOutput( logits=output.logits, logits_aggregation=output.logits_aggregation, hidden_states=hidden_states, attentions=attentions, ) @add_start_docstrings( """ Tapas Model with a sequence classification head on top (a linear layer on top of the pooled output), e.g. for table entailment tasks, such as TabFact (Chen et al., 2020). """, TAPAS_START_DOCSTRING, ) class TFTapasForSequenceClassification(TFTapasPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config: TapasConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.tapas = TFTapasMainLayer(config, name="tapas") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob, name="dropout") self.classifier = tf.keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) @unpack_inputs @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @replace_return_docstrings(output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: Optional[TFModelInputType] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[Union[np.ndarray, tf.Tensor]] = None, training: Optional[bool] = False, ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Note: this is called "classification_class_index" in the original implementation. Returns: Examples: ```python >>> from transformers import TapasTokenizer, TapasForSequenceClassification >>> import tensorflow as tf >>> import pandas as pd >>> tokenizer = TapasTokenizer.from_pretrained("google/tapas-base-finetuned-tabfact") >>> model = TapasForSequenceClassification.from_pretrained("google/tapas-base-finetuned-tabfact") >>> data = { ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], ... "Age": ["56", "45", "59"], ... "Number of movies": ["87", "53", "69"], ... } >>> table = pd.DataFrame.from_dict(data) >>> queries = [ ... "There is only one actor who is 45 years old", ... "There are 3 actors which played in more than 60 movies", ... ] >>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="tf") >>> labels = tf.convert_to_tensor([1, 0]) # 1 means entailed, 0 means refuted >>> outputs = model(**inputs, labels=labels) >>> loss = outputs.loss >>> logits = outputs.logits ```""" outputs = self.tapas( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) pooled_output = outputs[1] pooled_output = self.dropout(inputs=pooled_output, training=training) logits = self.classifier(inputs=pooled_output) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput: hidden_states = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attentions = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFSequenceClassifierOutput(logits=output.logits, hidden_states=hidden_states, attentions=attentions) """ TAPAS utilities.""" class AverageApproximationFunction(str, enum.Enum): RATIO = "ratio" FIRST_ORDER = "first_order" SECOND_ORDER = "second_order" # Beginning of everything related to segmented tensors class IndexMap(object): """Index grouping entries within a tensor.""" def __init__(self, indices, num_segments, batch_dims=0): """ Creates an index. Args: indices: <int32> Tensor of indices, same shape as `values`. num_segments: <int32> Scalar tensor, the number of segments. All elements in a batched segmented tensor must have the same number of segments (although many segments can be empty). batch_dims: Python integer, the number of batch dimensions. The first `batch_dims` dimensions of a SegmentedTensor are treated as batch dimensions. Segments in different batch elements are always distinct even if they have the same index. """ self.indices = tf.convert_to_tensor(indices) self.num_segments = tf.convert_to_tensor(num_segments) self.batch_dims = batch_dims def batch_shape(self): return tf.shape(self.indices)[: self.batch_dims] class ProductIndexMap(IndexMap): """The product of two indices.""" def __init__(self, outer_index, inner_index): """ Combines indices i and j into pairs (i, j). The result is an index where each segment (i, j) is the intersection of segments i and j. For example if the inputs represent table cells indexed by respectively rows and columns the output will be a table indexed by (row, column) pairs, i.e. by cell. The implementation combines indices {0, .., n - 1} and {0, .., m - 1} into {0, .., nm - 1}. The output has `num_segments` equal to `outer_index.num_segements` * `inner_index.num_segments`. Args: outer_index: IndexMap. inner_index: IndexMap, must have the same shape as `outer_index`. """ if outer_index.batch_dims != inner_index.batch_dims: raise ValueError("outer_index.batch_dims and inner_index.batch_dims must be the same.") super(ProductIndexMap, self).__init__( indices=( inner_index.indices + outer_index.indices * tf.cast(inner_index.num_segments, inner_index.indices.dtype) ), num_segments=inner_index.num_segments * outer_index.num_segments, batch_dims=inner_index.batch_dims, ) self.outer_index = outer_index self.inner_index = inner_index def project_outer(self, index): """Projects an index with the same index set onto the outer components.""" return IndexMap( indices=tf.math.floordiv(index.indices, self.inner_index.num_segments), num_segments=self.outer_index.num_segments, batch_dims=index.batch_dims, ) def project_inner(self, index): """Projects an index with the same index set onto the inner components.""" return IndexMap( indices=tf.math.floormod(index.indices, self.inner_index.num_segments), num_segments=self.inner_index.num_segments, batch_dims=index.batch_dims, ) def gather(values, index, name="segmented_gather"): """ Gathers from `values` using the index map. For each element in the domain of the index map this operation looks up a value for that index in `values`. Two elements from the same segment always get assigned the same value. Args: values: [B1, ..., Bn, num_segments, V1, ...] Tensor with segment values. index: [B1, ..., Bn, I1, ..., Ik] IndexMap. name: Name for the TensorFlow operation. Returns: [B1, ..., Bn, I1, ..., Ik, V1, ...] Tensor with the gathered values. """ return tf.gather(values, index.indices, batch_dims=index.batch_dims, name=name) def flatten(index, name="segmented_flatten"): """ Flattens a batched index map to a 1d index map. This operation relabels the segments to keep batch elements distinct. The k-th batch element will have indices shifted by `num_segments` * (k - 1). The result is a tensor with `num_segments` multiplied by the number of elements in the batch. Args: index: IndexMap to flatten. name: Name for the TensorFlow operation. Returns: The flattened IndexMap. """ batch_size = tf.reduce_prod(index.batch_shape()) offset = tf.range(batch_size) * index.num_segments offset = tf.reshape(offset, index.batch_shape()) for _ in range(index.batch_dims, index.indices.shape.rank): offset = tf.expand_dims(offset, -1) indices = tf.cast(offset, index.indices.dtype) + index.indices return IndexMap(indices=tf.reshape(indices, [-1]), num_segments=index.num_segments * batch_size, batch_dims=0) def range_index_map(batch_shape, num_segments, name="range_index_map"): """ Constructs an index map equal to range(num_segments). Args: batch_shape (`tf.Tensor`): Batch shape num_segments (`int`): Number of segments name (`str`, *optional*, defaults to 'range_index_map'): Name for the operation. Currently not used Returns: (`IndexMap`): IndexMap of shape batch_shape with elements equal to range(num_segments). """ batch_shape = tf.convert_to_tensor(batch_shape) batch_shape.shape.assert_has_rank(1) num_segments = tf.convert_to_tensor(num_segments) num_segments.shape.assert_has_rank(0) indices = tf.range(num_segments) shape = tf.concat([tf.ones_like(batch_shape, dtype=tf.int32), tf.expand_dims(num_segments, axis=0)], axis=0) indices = tf.reshape(indices, shape) multiples = tf.concat([batch_shape, [1]], axis=0) indices = tf.tile(indices, multiples) return IndexMap(indices=indices, num_segments=num_segments, batch_dims=batch_shape.shape.as_list()[0]) def _segment_reduce(values, index, segment_reduce_fn, name): """ Applies a segment reduction segment-wise. Args: values (`tf.Tensor`): Tensor with segment values. index (`IndexMap`): IndexMap. segment_reduce_fn (`str`): Name for the reduce operation. One of "sum", "mean", "max" or "min". name (`str`): Name for the operation. Currently not used Returns: (`IndexMap`): IndexMap of shape batch_shape with elements equal to range(num_segments). """ # Flatten the batch dimensions, as segments ops do not support batching. # However if `values` has extra dimensions to the right keep them # unflattened. Segmented ops support vector-valued operations. flat_index = flatten(index) vector_shape = tf.shape(values)[index.indices.shape.rank :] flattened_shape = tf.concat([[-1], vector_shape], axis=0) flat_values = tf.reshape(values, flattened_shape) segment_means = segment_reduce_fn( data=flat_values, segment_ids=flat_index.indices, num_segments=flat_index.num_segments ) # Unflatten the values. new_shape = tf.concat([index.batch_shape(), [index.num_segments], vector_shape], axis=0) output_values = tf.reshape(segment_means, new_shape) output_index = range_index_map(index.batch_shape(), index.num_segments) return output_values, output_index def reduce_mean(values, index, name="segmented_reduce_mean"): """ Averages a tensor over its segments. Outputs 0 for empty segments. This operations computes the mean over segments, with support for: - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices. - Vectorization using the last dimension [V1, V2, ...]. If they are present the output will be a mean of vectors rather than scalars. Only the middle dimensions [I1, ..., Ik] are reduced by the operation. Args: values: [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..] tensor of values to be averaged. index: IndexMap [B1, B2, ..., Bn, I1, .., Ik] index defining the segments. name: Name for the TensorFlow ops. Returns: A pair (output_values, output_index) where `output_values` is a tensor of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..] and `index` is an IndexMap with shape [B1, B2, ..., Bn, num_segments]. """ return _segment_reduce(values, index, tf.math.unsorted_segment_mean, name) def reduce_sum(values, index, name="segmented_reduce_sum"): """ Sums a tensor over its segments. Outputs 0 for empty segments. This operations computes the sum over segments, with support for: - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices. - Vectorization using the last dimension [V1, V2, ...]. If they are present the output will be a sum of vectors rather than scalars. Only the middle dimensions [I1, ..., Ik] are reduced by the operation. Args: values: [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..] tensor of values to be averaged. index: IndexMap [B1, B2, ..., Bn, I1, .., Ik] index defining the segments. name: Name for the TensorFlow ops. Returns: A pair (output_values, output_index) where `output_values` is a tensor of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..] and `index` is an IndexMap with shape [B1, B2, ..., Bn, num_segments]. """ return _segment_reduce(values, index, tf.math.unsorted_segment_sum, name) def reduce_max(values, index, name="segmented_reduce_max"): """ Computes the maximum over segments. This operations computes the maximum over segments, with support for: - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices. - Vectorization using the last dimension [V1, V2, ...]. If they are present the output will be an element-wise maximum of vectors rather than scalars. Only the middle dimensions [I1, ..., Ik] are reduced by the operation. Args: values: [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..] tensor of values to be averaged. index: IndexMap [B1, B2, ..., Bn, I1, .., Ik] index defining the segments. name: Name for the TensorFlow ops. Returns: A pair (output_values, output_index) where `output_values` is a tensor of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..] and `index` is an IndexMap with shape [B1, B2, ..., Bn, num_segments]. """ return _segment_reduce(values, index, tf.math.unsorted_segment_max, name) def reduce_min(values, index, name="segmented_reduce_min"): """Computes the minimum over segments.""" return _segment_reduce(values, index, tf.math.unsorted_segment_min, name) def _single_column_cell_selection_loss(token_logits, column_logits, labels, cell_index, col_index, cell_mask): """ Computes the loss for cell selection constrained to a single column. The loss is a hierarchical log-likelihood. The model first predicts a column and then selects cells within that column (conditioned on the column). Cells outside the selected column are never selected. Args: token_logits (`tf.Tensor` of shape `(batch_size, sequence_length)`): Tensor containing the logits per token. column_logits (`tf.Tensor` of shape `(batch_size, max_num_cols)`): Tensor containing the logits per column. labels (`tf.Tensor` of shape `(batch_size, sequence_length)`): Labels per token. cell_index (`ProductIndexMap`): Index that groups tokens into cells. col_index (`IndexMap`): Index that groups tokens into columns. cell_mask (`tf.Tensor` of shape `(batch_size, max_num_rows * max_num_cols)`): Mask for cells that exist in the table (i.e. that are not padding). Returns: selection_loss_per_example (`tf.Tensor` of shape `(batch_size,)`): Loss for each example. logits (`tf.Tensor` of shape `(batch_size, sequence_length)`): New logits which are only allowed to select cells in a single column. Logits outside of the most likely column according to *column_logits* will be set to a very low value (such that the probabilities are 0). """ # First find the column we should select. We use the column with maximum # number of selected cells. labels_per_column, _ = reduce_sum(tf.cast(labels, tf.float32), col_index) column_label = tf.argmax(labels_per_column, axis=-1, output_type=tf.int32) # Check if there are no selected cells in the column. In that case the model # should predict the special column id 0, which means "select nothing". no_cell_selected = tf.equal(tf.reduce_max(labels_per_column, axis=-1), 0) column_label = tf.where(no_cell_selected, tf.zeros_like(column_label), column_label) column_dist = tfp.distributions.Categorical(logits=column_logits) column_loss_per_example = -column_dist.log_prob(column_label) # Reduce the labels and logits to per-cell from per-token. logits_per_cell, _ = reduce_mean(token_logits, cell_index) labels_per_cell, labels_index = reduce_max(tf.cast(labels, tf.int32), cell_index) # Mask for the selected column. column_id_for_cells = cell_index.project_inner(labels_index).indices column_mask = tf.cast(tf.equal(column_id_for_cells, tf.expand_dims(column_label, axis=1)), tf.float32) # Compute the log-likelihood for cells, but only for the selected column. cell_dist = tfp.distributions.Bernoulli(logits=logits_per_cell) cell_log_prob = cell_dist.log_prob(labels_per_cell) cell_loss = -tf.reduce_sum(cell_log_prob * column_mask * cell_mask, axis=1) # We need to normalize the loss by the number of cells in the column. cell_loss /= tf.reduce_sum(column_mask * cell_mask, axis=1) + EPSILON_ZERO_DIVISION selection_loss_per_example = column_loss_per_example selection_loss_per_example += tf.where(no_cell_selected, tf.zeros_like(selection_loss_per_example), cell_loss) # Set the probs outside the selected column (selected by the *model*) # to 0. This ensures backwards compatibility with models that select # cells from multiple columns. selected_column_id = tf.argmax(column_logits, axis=-1, output_type=tf.int32) selected_column_mask = tf.cast( tf.equal(column_id_for_cells, tf.expand_dims(selected_column_id, axis=-1)), tf.float32 ) # Never select cells with the special column id 0. selected_column_mask = tf.where( tf.equal(column_id_for_cells, 0), tf.zeros_like(selected_column_mask), selected_column_mask ) logits_per_cell += CLOSE_ENOUGH_TO_LOG_ZERO * (1.0 - cell_mask * selected_column_mask) logits = gather(logits_per_cell, cell_index) return selection_loss_per_example, logits def _calculate_aggregate_mask(answer, pooled_output, cell_selection_preference, labels, aggregation_classifier): """ Finds examples where the model should select cells with no aggregation. Returns a mask that determines for which examples should the model select answers directly from the table, without any aggregation function. If the answer is a piece of text the case is unambiguous as aggregation functions only apply to numbers. If the answer is a number but does not appear in the table then we must use some aggregation case. The ambiguous case is when the answer is a number that also appears in the table. In this case we use the aggregation function probabilities predicted by the model to decide whether to select or aggregate. The threshold for this is a hyperparameter *cell_selection_preference* Args: answer (`tf.Tensor` of shape `(batch_size, )`): Answer for every example in the batch. Nan if there is no scalar answer. pooled_output (`tf.Tensor` of shape `(batch_size, hidden_size)`): Output of the pooler (BertPooler) on top of the encoder layer. cell_selection_preference (`float`): Preference for cell selection in ambiguous cases. labels (`tf.Tensor` of shape `(batch_size, sequence_length)`): Labels per token. aggregation_classifier (`torch.nn.Linear`): Aggregation head Returns: aggregate_mask (`tf.Tensor` of shape `(batch_size,)`): A mask set to 1 for examples that should use aggregation functions. """ # tf.Tensor(batch_size,) aggregate_mask_init = tf.cast(tf.logical_not(tf.math.is_nan(answer)), tf.float32) logits_aggregation = aggregation_classifier(pooled_output) dist_aggregation = tfp.distributions.Categorical(logits=logits_aggregation) # Index 0 corresponds to "no aggregation". aggregation_ops_total_mass = tf.reduce_sum(dist_aggregation.probs_parameter()[:, 1:], axis=1) # Cell selection examples according to current model. is_pred_cell_selection = aggregation_ops_total_mass <= cell_selection_preference # Examples with non-empty cell selection supervision. is_cell_supervision_available = tf.reduce_sum(labels, axis=1) > 0 aggregate_mask = tf.where( tf.logical_and(is_pred_cell_selection, is_cell_supervision_available), tf.zeros_like(aggregate_mask_init, dtype=tf.float32), aggregate_mask_init, ) aggregate_mask = tf.stop_gradient(aggregate_mask) return aggregate_mask def _calculate_aggregation_loss_known( logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels ): """ Calculates aggregation loss when its type is known during training. In the weakly supervised setting, the only known information is that for cell selection examples, "no aggregation" should be predicted. For other examples (those that require aggregation), no loss is accumulated. In the setting where aggregation type is always known, standard cross entropy loss is accumulated for all examples Args: logits_aggregation (`tf.Tensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. aggregate_mask (`tf.Tensor` of shape `(batch_size, )`): A mask set to 1 for examples that should use aggregation functions. aggregation_labels (`tf.Tensor` of shape `(batch_size, )`): Aggregation function id for every example in the batch. use_answer_as_supervision (`bool`, *optional*): Whether to use the answer as the only supervision for aggregation examples. num_aggregation_labels (`int`, *optional*, defaults to 0): The number of aggregation operators to predict. Returns: aggregation_loss_known (`tf.Tensor` of shape `(batch_size,)`): Aggregation loss (when its type is known during training) per example. """ if use_answer_as_supervision: # Prepare "no aggregation" targets for cell selection examples. target_aggregation = tf.zeros_like(aggregate_mask, dtype=tf.int32) else: # Use aggregation supervision as the target. target_aggregation = aggregation_labels one_hot_labels = tf.one_hot(target_aggregation, depth=num_aggregation_labels, dtype=tf.float32) log_probs = tf.nn.log_softmax(logits_aggregation, axis=-1) # <float32>[batch_size] per_example_aggregation_intermediate = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) if use_answer_as_supervision: # Accumulate loss only for examples requiring cell selection # (no aggregation). return per_example_aggregation_intermediate * (1 - aggregate_mask) else: return per_example_aggregation_intermediate def _calculate_aggregation_loss_unknown(logits_aggregation, aggregate_mask): """ Calculates aggregation loss in the case of answer supervision. Args: logits_aggregation (`tf.Tensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. aggregate_mask (`tf.Tensor` of shape `(batch_size, )`): A mask set to 1 for examples that should use aggregation functions Returns: aggregation_loss_unknown (`tf.Tensor` of shape `(batch_size,)`): Aggregation loss (in case of answer supervision) per example. """ dist_aggregation = tfp.distributions.Categorical(logits=logits_aggregation) # Index 0 corresponds to "no aggregation". aggregation_ops_total_mass = tf.reduce_sum(dist_aggregation.probs_parameter()[:, 1:], axis=1) # Predict some aggregation in case of an answer that needs aggregation. # This increases the probability of all aggregation functions, in a way # similar to MML, but without considering whether the function gives the # correct answer. return -tf.math.log(aggregation_ops_total_mass) * aggregate_mask def _calculate_aggregation_loss( logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels, aggregation_loss_weight, ): """ Calculates the aggregation loss per example. Args: logits_aggregation (`tf.Tensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. aggregate_mask (`tf.Tensor` of shape `(batch_size, )`): A mask set to 1 for examples that should use aggregation functions. aggregation_labels (`tf.Tensor` of shape `(batch_size, )`): Aggregation function id for every example in the batch. use_answer_as_supervision (`bool`, *optional*): Whether to use the answer as the only supervision for aggregation examples. num_aggregation_labels (`int`, *optional*, defaults to 0): The number of aggregation operators to predict. aggregation_loss_weight (`float`, *optional*, defaults to 1.0): Importance weight for the aggregation loss. Returns: aggregation_loss (`tf.Tensor` of shape `(batch_size,)`): Aggregation loss per example. """ per_example_aggregation_loss = _calculate_aggregation_loss_known( logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels ) if use_answer_as_supervision: # Add aggregation loss for numeric answers that need aggregation. per_example_aggregation_loss += _calculate_aggregation_loss_unknown(logits_aggregation, aggregate_mask) return aggregation_loss_weight * per_example_aggregation_loss def _calculate_expected_result( dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config ): """ Calculates the expected result given cell and aggregation probabilities. Args: dist_per_cell (`tfp.distributions.Bernoulli`): Cell selection distribution for each cell. numeric_values (`tf.Tensor` of shape `(batch_size, seq_length)`): Numeric values of every token. Nan for tokens which are not numeric values. numeric_values_scale (`tf.Tensor` of shape `(batch_size, seq_length)`): Scale of the numeric values of every token. input_mask_float (`tf.Tensor` of shape `(batch_size, seq_length)`): Mask for the table, without question tokens and table headers. logits_aggregation (`tf.Tensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. config ([`TapasConfig`]): Model configuration class with all the hyperparameters of the model Returns: expected_result (`tf.Tensor` of shape `(batch_size,)`): The expected result per example. """ if config.use_gumbel_for_cells: gumbel_dist = tfp.distributions.RelaxedBernoulli( # The token logits where already divided by the temperature and used for # computing cell selection errors so we need to multiply it again here config.temperature, logits=dist_per_cell.logits_parameter() * config.temperature, ) scaled_probability_per_cell = gumbel_dist.sample() else: scaled_probability_per_cell = dist_per_cell.probs_parameter() # <float32>[batch_size, seq_length] scaled_probability_per_cell = (scaled_probability_per_cell / numeric_values_scale) * input_mask_float count_result = tf.reduce_sum(scaled_probability_per_cell, axis=1) numeric_values_masked = tf.where( tf.math.is_nan(numeric_values), tf.zeros_like(numeric_values), numeric_values ) # Mask non-numeric table values to zero. sum_result = tf.reduce_sum(scaled_probability_per_cell * numeric_values_masked, axis=1) avg_approximation = config.average_approximation_function if avg_approximation == AverageApproximationFunction.RATIO: average_result = sum_result / (count_result + EPSILON_ZERO_DIVISION) elif avg_approximation == AverageApproximationFunction.FIRST_ORDER: # The sum of all probabilities exept that correspond to other cells ex = tf.reduce_sum(scaled_probability_per_cell, axis=1, keepdims=True) - scaled_probability_per_cell + 1 average_result = tf.reduce_sum(numeric_values_masked * scaled_probability_per_cell / ex, axis=1) elif avg_approximation == AverageApproximationFunction.SECOND_ORDER: # The sum of all probabilities exept that correspond to other cells ex = tf.reduce_sum(scaled_probability_per_cell, axis=1, keepdims=True) - scaled_probability_per_cell + 1 pointwise_var = scaled_probability_per_cell * (1 - scaled_probability_per_cell) var = tf.reduce_sum(pointwise_var, axis=1, keepdims=True) - pointwise_var multiplier = (var / tf.math.square(ex) + 1) / ex average_result = tf.reduce_sum(numeric_values_masked * scaled_probability_per_cell * multiplier, axis=1) else: raise ValueError("Invalid average_approximation_function: %s", config.average_approximation_function) if config.use_gumbel_for_aggregation: gumbel_dist = tfp.distributions.RelaxedOneHotCategorical( config.aggregation_temperature, logits=logits_aggregation[:, 1:] ) # <float32>[batch_size, num_aggregation_labels - 1] aggregation_op_only_probs = gumbel_dist.sample() else: # <float32>[batch_size, num_aggregation_labels - 1] aggregation_op_only_probs = stable_softmax(logits_aggregation[:, 1:] / config.aggregation_temperature, axis=-1) all_results = tf.concat( [ tf.expand_dims(sum_result, axis=1), tf.expand_dims(average_result, axis=1), tf.expand_dims(count_result, axis=1), ], axis=1, ) expected_result = tf.reduce_sum(all_results * aggregation_op_only_probs, axis=1) return expected_result def _calculate_regression_loss( answer, aggregate_mask, dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config, ): """ Calculates the regression loss per example. Args: answer (`tf.Tensor` of shape `(batch_size,)`): Answer for every example in the batch. Nan if there is no scalar answer. aggregate_mask (`tf.Tensor` of shape `(batch_size,)`): A mask set to 1 for examples that should use aggregation functions. dist_per_cell (`torch.distributions.Bernoulli`): Cell selection distribution for each cell. numeric_values (`tf.Tensor` of shape `(batch_size, seq_length)`): Numeric values of every token. Nan for tokens which are not numeric values. numeric_values_scale (`tf.Tensor` of shape `(batch_size, seq_length)`): Scale of the numeric values of every token. input_mask_float (`tf.Tensor` of shape `(batch_size, seq_length)`): Mask for the table, without question tokens and table headers. logits_aggregation (`tf.Tensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. config ([`TapasConfig`]): Model configuration class with all the parameters of the model Returns: per_example_answer_loss_scaled (`tf.Tensor` of shape `(batch_size,)`): Scales answer loss for each example in the batch. large_answer_loss_mask (`tf.Tensor` of shape `(batch_size,)`): A mask which is 1 for examples for which their answer loss is larger than the answer_loss_cutoff. """ # float32 (batch_size,) expected_result = _calculate_expected_result( dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config ) # <float32>[batch_size] answer_masked = tf.where(tf.math.is_nan(answer), tf.zeros_like(answer), answer) if config.use_normalized_answer_loss: normalizer = tf.stop_gradient( tf.math.maximum(tf.math.abs(expected_result), tf.math.abs(answer_masked)) + EPSILON_ZERO_DIVISION ) normalized_answer_masked = answer_masked / normalizer normalized_expected_result = expected_result / normalizer per_example_answer_loss = tf.compat.v1.losses.huber_loss( normalized_answer_masked * aggregate_mask, normalized_expected_result * aggregate_mask, delta=tf.cast(1.0, tf.float32), reduction=tf.losses.Reduction.NONE, ) else: per_example_answer_loss = tf.compat.v1.losses.huber_loss( answer_masked * aggregate_mask, expected_result * aggregate_mask, delta=tf.cast(config.huber_loss_delta, tf.float32), reduction=tf.losses.Reduction.NONE, ) if config.answer_loss_cutoff is None: large_answer_loss_mask = tf.ones_like(per_example_answer_loss, dtype=tf.float32) else: large_answer_loss_mask = tf.where( per_example_answer_loss > config.answer_loss_cutoff, tf.zeros_like(per_example_answer_loss, dtype=tf.float32), tf.ones_like(per_example_answer_loss, dtype=tf.float32), ) per_example_answer_loss_scaled = config.answer_loss_importance * (per_example_answer_loss * aggregate_mask) return per_example_answer_loss_scaled, large_answer_loss_mask
-1
huggingface/transformers
20,304
fix device issue
# What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
ydshieh
"2022-11-17T17:48:16Z"
"2022-11-21T09:12:26Z"
d316037ad71f8748aac9045ffd96970826456a04
8503cc755050c6ed5bc771e3244c29b71be1841e
fix device issue. # What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
./tests/generation/test_beam_constraints.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class ConstraintTest(unittest.TestCase): def test_input_types(self): # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. cset = [[1, 2, 4], [1, 2, 3, 4]] dc = DisjunctiveConstraint(cset) self.assertTrue(isinstance(dc.token_ids, list)) with self.assertRaises(ValueError): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]])) with self.assertRaises(ValueError): DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])]) def test_check_illegal_input(self): # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). cset = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(ValueError): DisjunctiveConstraint(cset) # fails here def test_example_progression(self): cset = [[1, 2, 3], [1, 2, 4]] dc = DisjunctiveConstraint(cset) stepped, completed, reset = dc.update(1) desired = stepped is True and completed is False and reset is False self.assertTrue(desired) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1]) stepped, completed, reset = dc.update(2) desired = stepped is True and completed is False and reset is False self.assertTrue(desired) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1, 2]) stepped, completed, reset = dc.update(3) desired = stepped is True and completed is True and reset is False self.assertTrue(desired) self.assertTrue(dc.completed) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3]) def test_example_progression_unequal_three_mid_and_reset(self): cset = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] dc = DisjunctiveConstraint(cset) stepped, completed, reset = dc.update(1) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1]) stepped, completed, reset = dc.update(2) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1, 2]) stepped, completed, reset = dc.update(4) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1, 2, 4]) stepped, completed, reset = dc.update(5) self.assertTrue(dc.completed) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5]) dc.reset() stepped, completed, reset = dc.update(1) self.assertTrue(not dc.completed) self.assertTrue(dc.remaining() == 3) self.assertTrue(dc.current_seq == [1]) stepped, completed, reset = dc.update(2) self.assertTrue(not dc.completed) self.assertTrue(dc.remaining() == 2) self.assertTrue(dc.current_seq == [1, 2]) stepped, completed, reset = dc.update(5) self.assertTrue(dc.completed) # Completed! self.assertTrue(dc.remaining() == 0) self.assertTrue(dc.current_seq == [1, 2, 5])
# coding=utf-8 # Copyright 2020 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class ConstraintTest(unittest.TestCase): def test_input_types(self): # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. cset = [[1, 2, 4], [1, 2, 3, 4]] dc = DisjunctiveConstraint(cset) self.assertTrue(isinstance(dc.token_ids, list)) with self.assertRaises(ValueError): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]])) with self.assertRaises(ValueError): DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])]) def test_check_illegal_input(self): # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). cset = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(ValueError): DisjunctiveConstraint(cset) # fails here def test_example_progression(self): cset = [[1, 2, 3], [1, 2, 4]] dc = DisjunctiveConstraint(cset) stepped, completed, reset = dc.update(1) desired = stepped is True and completed is False and reset is False self.assertTrue(desired) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1]) stepped, completed, reset = dc.update(2) desired = stepped is True and completed is False and reset is False self.assertTrue(desired) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1, 2]) stepped, completed, reset = dc.update(3) desired = stepped is True and completed is True and reset is False self.assertTrue(desired) self.assertTrue(dc.completed) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3]) def test_example_progression_unequal_three_mid_and_reset(self): cset = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] dc = DisjunctiveConstraint(cset) stepped, completed, reset = dc.update(1) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1]) stepped, completed, reset = dc.update(2) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1, 2]) stepped, completed, reset = dc.update(4) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1, 2, 4]) stepped, completed, reset = dc.update(5) self.assertTrue(dc.completed) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5]) dc.reset() stepped, completed, reset = dc.update(1) self.assertTrue(not dc.completed) self.assertTrue(dc.remaining() == 3) self.assertTrue(dc.current_seq == [1]) stepped, completed, reset = dc.update(2) self.assertTrue(not dc.completed) self.assertTrue(dc.remaining() == 2) self.assertTrue(dc.current_seq == [1, 2]) stepped, completed, reset = dc.update(5) self.assertTrue(dc.completed) # Completed! self.assertTrue(dc.remaining() == 0) self.assertTrue(dc.current_seq == [1, 2, 5])
-1
huggingface/transformers
20,304
fix device issue
# What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
ydshieh
"2022-11-17T17:48:16Z"
"2022-11-21T09:12:26Z"
d316037ad71f8748aac9045ffd96970826456a04
8503cc755050c6ed5bc771e3244c29b71be1841e
fix device issue. # What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
./src/transformers/models/vit_msn/modeling_vit_msn.py
# coding=utf-8 # Copyright 2022 Facebook AI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch ViT MSN (masked siamese network) model.""" import collections.abc import math from typing import Dict, List, Optional, Set, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput, ImageClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_vit_msn import ViTMSNConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "ViTMSNConfig" _CHECKPOINT_FOR_DOC = "facebook/vit-msn-small" VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST = [ "facebook/vit-msn-small", # See all ViTMSN models at https://huggingface.co/models?filter=vit_msn ] class ViTMSNEmbeddings(nn.Module): """ Construct the CLS token, position and patch embeddings. Optionally, also the mask token. """ def __init__(self, config: ViTMSNConfig, use_mask_token: bool = False) -> None: super().__init__() self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) if use_mask_token else None self.patch_embeddings = ViTMSNPatchEmbeddings(config) num_patches = self.patch_embeddings.num_patches self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size)) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.config = config def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. Source: https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174 """ num_patches = embeddings.shape[1] - 1 num_positions = self.position_embeddings.shape[1] - 1 if num_patches == num_positions and height == width: return self.position_embeddings class_pos_embed = self.position_embeddings[:, 0] patch_pos_embed = self.position_embeddings[:, 1:] dim = embeddings.shape[-1] patch_window_height = height // self.config.patch_size patch_window_width = width // self.config.patch_size # we add a small number to avoid floating point error in the interpolation # see discussion at https://github.com/facebookresearch/dino/issues/8 patch_window_height, patch_window_width = patch_window_height + 0.1, patch_window_width + 0.1 patch_pos_embed = patch_pos_embed.reshape(1, int(math.sqrt(num_positions)), int(math.sqrt(num_positions)), dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate( patch_pos_embed, scale_factor=( patch_window_height / math.sqrt(num_positions), patch_window_width / math.sqrt(num_positions), ), mode="bicubic", align_corners=False, ) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1) def forward( self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor] = None, interpolate_pos_encoding: bool = False, ) -> torch.Tensor: batch_size, num_channels, height, width = pixel_values.shape embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) if bool_masked_pos is not None: seq_length = embeddings.shape[1] mask_tokens = self.mask_token.expand(batch_size, seq_length, -1) # replace the masked visual tokens by mask_tokens mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens) embeddings = embeddings * (1.0 - mask) + mask_tokens * mask # add the [CLS] token to the embedded patch tokens cls_tokens = self.cls_token.expand(batch_size, -1, -1) embeddings = torch.cat((cls_tokens, embeddings), dim=1) # add positional encoding to each token if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embeddings embeddings = self.dropout(embeddings) return embeddings # Copied from transformers.models.vit.modeling_vit.ViTPatchEmbeddings with ViT->ViTMSN class ViTMSNPatchEmbeddings(nn.Module): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config): super().__init__() image_size, patch_size = config.image_size, config.patch_size num_channels, hidden_size = config.num_channels, config.hidden_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False) -> torch.Tensor: batch_size, num_channels, height, width = pixel_values.shape if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) if not interpolate_pos_encoding: if height != self.image_size[0] or width != self.image_size[1]: raise ValueError( f"Input image size ({height}*{width}) doesn't match model" f" ({self.image_size[0]}*{self.image_size[1]})." ) embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2) return embeddings # Copied from transformers.models.vit.modeling_vit.ViTSelfAttention with ViT->ViTMSN class ViTMSNSelfAttention(nn.Module): def __init__(self, config: ViTMSNConfig) -> None: super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size {config.hidden_size,} is not a multiple of the number of attention " f"heads {config.num_attention_heads}." ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs # Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->ViTMSN class ViTMSNSelfOutput(nn.Module): """ The residual connection is defined in ViTMSNLayer instead of here (as is the case with other models), due to the layernorm applied before each block. """ def __init__(self, config: ViTMSNConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states # Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->ViTMSN class ViTMSNAttention(nn.Module): def __init__(self, config: ViTMSNConfig) -> None: super().__init__() self.attention = ViTMSNSelfAttention(config) self.output = ViTMSNSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads: Set[int]) -> None: if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads ) # Prune linear layers self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: self_outputs = self.attention(hidden_states, head_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.vit.modeling_vit.ViTIntermediate with ViT->ViTMSN class ViTMSNIntermediate(nn.Module): def __init__(self, config: ViTMSNConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.vit.modeling_vit.ViTOutput with ViT->ViTMSN class ViTMSNOutput(nn.Module): def __init__(self, config: ViTMSNConfig) -> None: super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states # Copied from transformers.models.vit.modeling_vit.ViTLayer with ViT->ViTMSN class ViTMSNLayer(nn.Module): """This corresponds to the Block class in the timm implementation.""" def __init__(self, config: ViTMSNConfig) -> None: super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = ViTMSNAttention(config) self.intermediate = ViTMSNIntermediate(config) self.output = ViTMSNOutput(config) self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: self_attention_outputs = self.attention( self.layernorm_before(hidden_states), # in ViTMSN, layernorm is applied before self-attention head_mask, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights # first residual connection hidden_states = attention_output + hidden_states # in ViTMSN, layernorm is also applied after self-attention layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) # second residual connection is done here layer_output = self.output(layer_output, hidden_states) outputs = (layer_output,) + outputs return outputs # Copied from transformers.models.vit.modeling_vit.ViTEncoder with ViT->ViTMSN class ViTMSNEncoder(nn.Module): def __init__(self, config: ViTMSNConfig) -> None: super().__init__() self.config = config self.layer = nn.ModuleList([ViTMSNLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ) -> Union[tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, layer_head_mask, ) else: layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class ViTMSNPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = ViTMSNConfig base_model_prefix = "vit" main_input_name = "pixel_values" supports_gradient_checkpointing = True # todo: Resort to https://github.com/facebookresearch/msn/blob/main/src/deit.py#L200-#L211 # when creating pre-training scripts. def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) def _set_gradient_checkpointing(self, module: ViTMSNEncoder, value: bool = False) -> None: if isinstance(module, ViTMSNEncoder): module.gradient_checkpointing = value VIT_MSN_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ViTMSNConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ VIT_MSN_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoFeatureExtractor`]. See [`AutoFeatureExtractor.__call__`] for details. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. interpolate_pos_encoding (`bool`, *optional*): Whether to interpolate the pre-trained position encodings. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare ViTMSN Model outputting raw hidden-states without any specific head on top.", VIT_MSN_START_DOCSTRING, ) class ViTMSNModel(ViTMSNPreTrainedModel): def __init__(self, config: ViTMSNConfig, use_mask_token: bool = False): super().__init__(config) self.config = config self.embeddings = ViTMSNEmbeddings(config, use_mask_token=use_mask_token) self.encoder = ViTMSNEncoder(config) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> ViTMSNPatchEmbeddings: return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None: """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(VIT_MSN_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: Optional[torch.Tensor] = None, bool_masked_pos: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: r""" Returns: Examples: ```python >>> from transformers import AutoFeatureExtractor, ViTMSNModel >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/vit-msn-small") >>> model = ViTMSNModel.from_pretrained("facebook/vit-msn-small") >>> inputs = feature_extractor(images=image, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding ) encoder_outputs = self.encoder( embedding_output, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) if not return_dict: head_outputs = (sequence_output,) return head_outputs + encoder_outputs[1:] return BaseModelOutput( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) # Caution: We don't have the weights for the classification head yet. This class # is here for the users that are interested to fine-tune the base model (ViTMSNModel). @add_start_docstrings( """ ViTMSN Model with an image classification head on top e.g. for ImageNet. """, VIT_MSN_START_DOCSTRING, ) class ViTMSNForImageClassification(ViTMSNPreTrainedModel): def __init__(self, config: ViTMSNConfig) -> None: super().__init__(config) self.num_labels = config.num_labels self.vit = ViTMSNModel(config) # Classifier head self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(VIT_MSN_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ImageClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, ImageClassifierOutput]: r""" Returns: Examples: ```python >>> from transformers import AutoFeatureExtractor, ViTMSNForImageClassification >>> import torch >>> from PIL import Image >>> import requests >>> torch.manual_seed(2) # doctest: +IGNORE_RESULT >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/vit-msn-small") >>> model = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small") >>> inputs = feature_extractor(images=image, return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> # model predicts one of the 1000 ImageNet classes >>> predicted_label = logits.argmax(-1).item() >>> print(model.config.id2label[predicted_label]) Kerry blue terrier ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.vit( pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output[:, 0, :]) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
# coding=utf-8 # Copyright 2022 Facebook AI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch ViT MSN (masked siamese network) model.""" import collections.abc import math from typing import Dict, List, Optional, Set, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput, ImageClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_vit_msn import ViTMSNConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "ViTMSNConfig" _CHECKPOINT_FOR_DOC = "facebook/vit-msn-small" VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST = [ "facebook/vit-msn-small", # See all ViTMSN models at https://huggingface.co/models?filter=vit_msn ] class ViTMSNEmbeddings(nn.Module): """ Construct the CLS token, position and patch embeddings. Optionally, also the mask token. """ def __init__(self, config: ViTMSNConfig, use_mask_token: bool = False) -> None: super().__init__() self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) if use_mask_token else None self.patch_embeddings = ViTMSNPatchEmbeddings(config) num_patches = self.patch_embeddings.num_patches self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size)) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.config = config def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. Source: https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174 """ num_patches = embeddings.shape[1] - 1 num_positions = self.position_embeddings.shape[1] - 1 if num_patches == num_positions and height == width: return self.position_embeddings class_pos_embed = self.position_embeddings[:, 0] patch_pos_embed = self.position_embeddings[:, 1:] dim = embeddings.shape[-1] patch_window_height = height // self.config.patch_size patch_window_width = width // self.config.patch_size # we add a small number to avoid floating point error in the interpolation # see discussion at https://github.com/facebookresearch/dino/issues/8 patch_window_height, patch_window_width = patch_window_height + 0.1, patch_window_width + 0.1 patch_pos_embed = patch_pos_embed.reshape(1, int(math.sqrt(num_positions)), int(math.sqrt(num_positions)), dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate( patch_pos_embed, scale_factor=( patch_window_height / math.sqrt(num_positions), patch_window_width / math.sqrt(num_positions), ), mode="bicubic", align_corners=False, ) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1) def forward( self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor] = None, interpolate_pos_encoding: bool = False, ) -> torch.Tensor: batch_size, num_channels, height, width = pixel_values.shape embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) if bool_masked_pos is not None: seq_length = embeddings.shape[1] mask_tokens = self.mask_token.expand(batch_size, seq_length, -1) # replace the masked visual tokens by mask_tokens mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens) embeddings = embeddings * (1.0 - mask) + mask_tokens * mask # add the [CLS] token to the embedded patch tokens cls_tokens = self.cls_token.expand(batch_size, -1, -1) embeddings = torch.cat((cls_tokens, embeddings), dim=1) # add positional encoding to each token if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embeddings embeddings = self.dropout(embeddings) return embeddings # Copied from transformers.models.vit.modeling_vit.ViTPatchEmbeddings with ViT->ViTMSN class ViTMSNPatchEmbeddings(nn.Module): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config): super().__init__() image_size, patch_size = config.image_size, config.patch_size num_channels, hidden_size = config.num_channels, config.hidden_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False) -> torch.Tensor: batch_size, num_channels, height, width = pixel_values.shape if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) if not interpolate_pos_encoding: if height != self.image_size[0] or width != self.image_size[1]: raise ValueError( f"Input image size ({height}*{width}) doesn't match model" f" ({self.image_size[0]}*{self.image_size[1]})." ) embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2) return embeddings # Copied from transformers.models.vit.modeling_vit.ViTSelfAttention with ViT->ViTMSN class ViTMSNSelfAttention(nn.Module): def __init__(self, config: ViTMSNConfig) -> None: super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size {config.hidden_size,} is not a multiple of the number of attention " f"heads {config.num_attention_heads}." ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs # Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->ViTMSN class ViTMSNSelfOutput(nn.Module): """ The residual connection is defined in ViTMSNLayer instead of here (as is the case with other models), due to the layernorm applied before each block. """ def __init__(self, config: ViTMSNConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states # Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->ViTMSN class ViTMSNAttention(nn.Module): def __init__(self, config: ViTMSNConfig) -> None: super().__init__() self.attention = ViTMSNSelfAttention(config) self.output = ViTMSNSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads: Set[int]) -> None: if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads ) # Prune linear layers self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: self_outputs = self.attention(hidden_states, head_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.vit.modeling_vit.ViTIntermediate with ViT->ViTMSN class ViTMSNIntermediate(nn.Module): def __init__(self, config: ViTMSNConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.vit.modeling_vit.ViTOutput with ViT->ViTMSN class ViTMSNOutput(nn.Module): def __init__(self, config: ViTMSNConfig) -> None: super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states # Copied from transformers.models.vit.modeling_vit.ViTLayer with ViT->ViTMSN class ViTMSNLayer(nn.Module): """This corresponds to the Block class in the timm implementation.""" def __init__(self, config: ViTMSNConfig) -> None: super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = ViTMSNAttention(config) self.intermediate = ViTMSNIntermediate(config) self.output = ViTMSNOutput(config) self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: self_attention_outputs = self.attention( self.layernorm_before(hidden_states), # in ViTMSN, layernorm is applied before self-attention head_mask, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights # first residual connection hidden_states = attention_output + hidden_states # in ViTMSN, layernorm is also applied after self-attention layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) # second residual connection is done here layer_output = self.output(layer_output, hidden_states) outputs = (layer_output,) + outputs return outputs # Copied from transformers.models.vit.modeling_vit.ViTEncoder with ViT->ViTMSN class ViTMSNEncoder(nn.Module): def __init__(self, config: ViTMSNConfig) -> None: super().__init__() self.config = config self.layer = nn.ModuleList([ViTMSNLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ) -> Union[tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, layer_head_mask, ) else: layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class ViTMSNPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = ViTMSNConfig base_model_prefix = "vit" main_input_name = "pixel_values" supports_gradient_checkpointing = True # todo: Resort to https://github.com/facebookresearch/msn/blob/main/src/deit.py#L200-#L211 # when creating pre-training scripts. def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) def _set_gradient_checkpointing(self, module: ViTMSNEncoder, value: bool = False) -> None: if isinstance(module, ViTMSNEncoder): module.gradient_checkpointing = value VIT_MSN_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ViTMSNConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ VIT_MSN_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoFeatureExtractor`]. See [`AutoFeatureExtractor.__call__`] for details. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. interpolate_pos_encoding (`bool`, *optional*): Whether to interpolate the pre-trained position encodings. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare ViTMSN Model outputting raw hidden-states without any specific head on top.", VIT_MSN_START_DOCSTRING, ) class ViTMSNModel(ViTMSNPreTrainedModel): def __init__(self, config: ViTMSNConfig, use_mask_token: bool = False): super().__init__(config) self.config = config self.embeddings = ViTMSNEmbeddings(config, use_mask_token=use_mask_token) self.encoder = ViTMSNEncoder(config) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> ViTMSNPatchEmbeddings: return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None: """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(VIT_MSN_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: Optional[torch.Tensor] = None, bool_masked_pos: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: r""" Returns: Examples: ```python >>> from transformers import AutoFeatureExtractor, ViTMSNModel >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/vit-msn-small") >>> model = ViTMSNModel.from_pretrained("facebook/vit-msn-small") >>> inputs = feature_extractor(images=image, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding ) encoder_outputs = self.encoder( embedding_output, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) if not return_dict: head_outputs = (sequence_output,) return head_outputs + encoder_outputs[1:] return BaseModelOutput( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) # Caution: We don't have the weights for the classification head yet. This class # is here for the users that are interested to fine-tune the base model (ViTMSNModel). @add_start_docstrings( """ ViTMSN Model with an image classification head on top e.g. for ImageNet. """, VIT_MSN_START_DOCSTRING, ) class ViTMSNForImageClassification(ViTMSNPreTrainedModel): def __init__(self, config: ViTMSNConfig) -> None: super().__init__(config) self.num_labels = config.num_labels self.vit = ViTMSNModel(config) # Classifier head self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(VIT_MSN_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ImageClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, ImageClassifierOutput]: r""" Returns: Examples: ```python >>> from transformers import AutoFeatureExtractor, ViTMSNForImageClassification >>> import torch >>> from PIL import Image >>> import requests >>> torch.manual_seed(2) # doctest: +IGNORE_RESULT >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/vit-msn-small") >>> model = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small") >>> inputs = feature_extractor(images=image, return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> # model predicts one of the 1000 ImageNet classes >>> predicted_label = logits.argmax(-1).item() >>> print(model.config.id2label[predicted_label]) Kerry blue terrier ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.vit( pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output[:, 0, :]) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
-1
huggingface/transformers
20,304
fix device issue
# What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
ydshieh
"2022-11-17T17:48:16Z"
"2022-11-21T09:12:26Z"
d316037ad71f8748aac9045ffd96970826456a04
8503cc755050c6ed5bc771e3244c29b71be1841e
fix device issue. # What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
./tests/models/donut/__init__.py
-1
huggingface/transformers
20,304
fix device issue
# What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
ydshieh
"2022-11-17T17:48:16Z"
"2022-11-21T09:12:26Z"
d316037ad71f8748aac9045ffd96970826456a04
8503cc755050c6ed5bc771e3244c29b71be1841e
fix device issue. # What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
./src/transformers/models/convnext/feature_extraction_convnext.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Feature extractor class for ConvNeXT.""" from ...utils import logging from .image_processing_convnext import ConvNextImageProcessor logger = logging.get_logger(__name__) ConvNextFeatureExtractor = ConvNextImageProcessor
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Feature extractor class for ConvNeXT.""" from ...utils import logging from .image_processing_convnext import ConvNextImageProcessor logger = logging.get_logger(__name__) ConvNextFeatureExtractor = ConvNextImageProcessor
-1
huggingface/transformers
20,304
fix device issue
# What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
ydshieh
"2022-11-17T17:48:16Z"
"2022-11-21T09:12:26Z"
d316037ad71f8748aac9045ffd96970826456a04
8503cc755050c6ed5bc771e3244c29b71be1841e
fix device issue. # What does this PR do? When this block is run ``` if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) ``` `scale_fct` (defined a few line below) is always on `cpu`. We need to put it on the proper device.
./examples/research_projects/seq2seq-distillation/run_eval.py
#!/usr/bin/env python import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List import torch from tqdm import tqdm from transformers import AutoModelForSeq2SeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params logger = getLogger(__name__) DEFAULT_DEVICE = "cuda" if torch.cuda.is_available() else "cpu" def generate_summaries_or_translations( examples: List[str], out_file: str, model_name: str, batch_size: int = 8, device: str = DEFAULT_DEVICE, fp16=False, task="summarization", prefix=None, **generate_kwargs, ) -> Dict: """Save model.generate results to <out_file>, and return how long it took.""" fout = Path(out_file).open("w", encoding="utf-8") model_name = str(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name).to(device) if fp16: model = model.half() tokenizer = AutoTokenizer.from_pretrained(model_name) logger.info(f"Inferred tokenizer type: {tokenizer.__class__}") # if this is wrong, check config.model_type. start_time = time.time() # update config with task specific params use_task_specific_params(model, task) if prefix is None: prefix = prefix or getattr(model.config, "prefix", "") or "" for examples_chunk in tqdm(list(chunks(examples, batch_size))): examples_chunk = [prefix + text for text in examples_chunk] batch = tokenizer(examples_chunk, return_tensors="pt", truncation=True, padding="longest").to(device) summaries = model.generate( input_ids=batch.input_ids, attention_mask=batch.attention_mask, **generate_kwargs, ) dec = tokenizer.batch_decode(summaries, skip_special_tokens=True, clean_up_tokenization_spaces=False) for hypothesis in dec: fout.write(hypothesis + "\n") fout.flush() fout.close() runtime = int(time.time() - start_time) # seconds n_obs = len(examples) return dict(n_obs=n_obs, runtime=runtime, seconds_per_sample=round(runtime / n_obs, 4)) def datetime_now(): return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") def run_generate(verbose=True): """ Takes input text, generates output, and then using reference calculates the BLEU scores. The results are saved to a file and returned to the caller, and printed out unless ``verbose=False`` is passed. Args: verbose (:obj:`bool`, `optional`, defaults to :obj:`True`): print results to stdout Returns: a tuple: ``(scores, params}`` - ``scores``: a dict of scores data ``{'bleu': 39.6501, 'n_obs': 2000, 'runtime': 186, 'seconds_per_sample': 0.093}`` - ``params``: a dict of custom params, e.g. ``{'num_beams': 5, 'length_penalty': 0.8}`` """ parser = argparse.ArgumentParser() parser.add_argument("model_name", type=str, help="like facebook/bart-large-cnn,t5-base, etc.") parser.add_argument("input_path", type=str, help="like cnn_dm/test.source") parser.add_argument("save_path", type=str, help="where to save summaries") parser.add_argument("--reference_path", type=str, required=False, help="like cnn_dm/test.target") parser.add_argument("--score_path", type=str, required=False, default="metrics.json", help="where to save metrics") parser.add_argument("--device", type=str, required=False, default=DEFAULT_DEVICE, help="cuda, cuda:1, cpu etc.") parser.add_argument( "--prefix", type=str, required=False, default=None, help="will be added to the begininng of src examples" ) parser.add_argument("--task", type=str, default="summarization", help="used for task_specific_params + metrics") parser.add_argument("--bs", type=int, default=8, required=False, help="batch size") parser.add_argument( "--n_obs", type=int, default=-1, required=False, help="How many observations. Defaults to all." ) parser.add_argument("--fp16", action="store_true") parser.add_argument("--dump-args", action="store_true", help="print the custom hparams with the results") parser.add_argument( "--info", nargs="?", type=str, const=datetime_now(), help=( "use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g." " lang=en-ru. If no value is passed, the current datetime string will be used." ), ) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate args, rest = parser.parse_known_args() parsed_args = parse_numeric_n_bool_cl_kwargs(rest) if parsed_args and verbose: print(f"parsed the following generate kwargs: {parsed_args}") with open(args.input_path) as f: examples = [" " + x.rstrip() if "t5" in args.model_name else x.rstrip() for x in f.readlines()] if args.n_obs > 0: examples = examples[: args.n_obs] Path(args.save_path).parent.mkdir(exist_ok=True) if args.reference_path is None and Path(args.score_path).exists(): warnings.warn(f"score_path {args.score_path} will be overwritten unless you type ctrl-c.") runtime_metrics = generate_summaries_or_translations( examples, args.save_path, args.model_name, batch_size=args.bs, device=args.device, fp16=args.fp16, task=args.task, prefix=args.prefix, **parsed_args, ) if args.reference_path is None: return {} # Compute scores score_fn = calculate_bleu if "translation" in args.task else calculate_rouge output_lns = [x.rstrip() for x in open(args.save_path).readlines()] reference_lns = [x.rstrip() for x in open(args.reference_path).readlines()][: len(output_lns)] scores: dict = score_fn(output_lns, reference_lns) scores.update(runtime_metrics) if args.dump_args: scores.update(parsed_args) if args.info: scores["info"] = args.info if verbose: print(scores) if args.score_path is not None: json.dump(scores, open(args.score_path, "w")) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
#!/usr/bin/env python import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List import torch from tqdm import tqdm from transformers import AutoModelForSeq2SeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params logger = getLogger(__name__) DEFAULT_DEVICE = "cuda" if torch.cuda.is_available() else "cpu" def generate_summaries_or_translations( examples: List[str], out_file: str, model_name: str, batch_size: int = 8, device: str = DEFAULT_DEVICE, fp16=False, task="summarization", prefix=None, **generate_kwargs, ) -> Dict: """Save model.generate results to <out_file>, and return how long it took.""" fout = Path(out_file).open("w", encoding="utf-8") model_name = str(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name).to(device) if fp16: model = model.half() tokenizer = AutoTokenizer.from_pretrained(model_name) logger.info(f"Inferred tokenizer type: {tokenizer.__class__}") # if this is wrong, check config.model_type. start_time = time.time() # update config with task specific params use_task_specific_params(model, task) if prefix is None: prefix = prefix or getattr(model.config, "prefix", "") or "" for examples_chunk in tqdm(list(chunks(examples, batch_size))): examples_chunk = [prefix + text for text in examples_chunk] batch = tokenizer(examples_chunk, return_tensors="pt", truncation=True, padding="longest").to(device) summaries = model.generate( input_ids=batch.input_ids, attention_mask=batch.attention_mask, **generate_kwargs, ) dec = tokenizer.batch_decode(summaries, skip_special_tokens=True, clean_up_tokenization_spaces=False) for hypothesis in dec: fout.write(hypothesis + "\n") fout.flush() fout.close() runtime = int(time.time() - start_time) # seconds n_obs = len(examples) return dict(n_obs=n_obs, runtime=runtime, seconds_per_sample=round(runtime / n_obs, 4)) def datetime_now(): return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") def run_generate(verbose=True): """ Takes input text, generates output, and then using reference calculates the BLEU scores. The results are saved to a file and returned to the caller, and printed out unless ``verbose=False`` is passed. Args: verbose (:obj:`bool`, `optional`, defaults to :obj:`True`): print results to stdout Returns: a tuple: ``(scores, params}`` - ``scores``: a dict of scores data ``{'bleu': 39.6501, 'n_obs': 2000, 'runtime': 186, 'seconds_per_sample': 0.093}`` - ``params``: a dict of custom params, e.g. ``{'num_beams': 5, 'length_penalty': 0.8}`` """ parser = argparse.ArgumentParser() parser.add_argument("model_name", type=str, help="like facebook/bart-large-cnn,t5-base, etc.") parser.add_argument("input_path", type=str, help="like cnn_dm/test.source") parser.add_argument("save_path", type=str, help="where to save summaries") parser.add_argument("--reference_path", type=str, required=False, help="like cnn_dm/test.target") parser.add_argument("--score_path", type=str, required=False, default="metrics.json", help="where to save metrics") parser.add_argument("--device", type=str, required=False, default=DEFAULT_DEVICE, help="cuda, cuda:1, cpu etc.") parser.add_argument( "--prefix", type=str, required=False, default=None, help="will be added to the begininng of src examples" ) parser.add_argument("--task", type=str, default="summarization", help="used for task_specific_params + metrics") parser.add_argument("--bs", type=int, default=8, required=False, help="batch size") parser.add_argument( "--n_obs", type=int, default=-1, required=False, help="How many observations. Defaults to all." ) parser.add_argument("--fp16", action="store_true") parser.add_argument("--dump-args", action="store_true", help="print the custom hparams with the results") parser.add_argument( "--info", nargs="?", type=str, const=datetime_now(), help=( "use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g." " lang=en-ru. If no value is passed, the current datetime string will be used." ), ) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate args, rest = parser.parse_known_args() parsed_args = parse_numeric_n_bool_cl_kwargs(rest) if parsed_args and verbose: print(f"parsed the following generate kwargs: {parsed_args}") with open(args.input_path) as f: examples = [" " + x.rstrip() if "t5" in args.model_name else x.rstrip() for x in f.readlines()] if args.n_obs > 0: examples = examples[: args.n_obs] Path(args.save_path).parent.mkdir(exist_ok=True) if args.reference_path is None and Path(args.score_path).exists(): warnings.warn(f"score_path {args.score_path} will be overwritten unless you type ctrl-c.") runtime_metrics = generate_summaries_or_translations( examples, args.save_path, args.model_name, batch_size=args.bs, device=args.device, fp16=args.fp16, task=args.task, prefix=args.prefix, **parsed_args, ) if args.reference_path is None: return {} # Compute scores score_fn = calculate_bleu if "translation" in args.task else calculate_rouge output_lns = [x.rstrip() for x in open(args.save_path).readlines()] reference_lns = [x.rstrip() for x in open(args.reference_path).readlines()][: len(output_lns)] scores: dict = score_fn(output_lns, reference_lns) scores.update(runtime_metrics) if args.dump_args: scores.update(parsed_args) if args.info: scores["info"] = args.info if verbose: print(scores) if args.score_path is not None: json.dump(scores, open(args.score_path, "w")) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
-1