text
stringlengths
29
317k
id
stringlengths
22
166
metadata
dict
__index_level_0__
int64
0
231
{ "fp16": { "enabled": "auto", "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "betas": "auto", "eps": "auto", "weight_decay": "auto" } }, "scheduler": { "type": "WarmupLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto" } }, "zero_optimization": { "stage": 2, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "find_unused_parameters": true, "allgather_partitions": true, "allgather_bucket_size": 2e8, "overlap_comm": true, "reduce_scatter": true, "reduce_bucket_size": 2e8, "contiguous_gradients": true }, "gradient_accumulation_steps": "auto", "gradient_clipping": "auto", "steps_per_print": 2000, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "wall_clock_breakdown": false }
transformers/examples/research_projects/wav2vec2/ds_config_wav2vec2_zero2.json/0
{ "file_path": "transformers/examples/research_projects/wav2vec2/ds_config_wav2vec2_zero2.json", "repo_id": "transformers", "token_count": 669 }
79
#!/usr/bin/env python # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Script for training a masked language model on TPU.""" import argparse import logging import os import re import tensorflow as tf from packaging.version import parse from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) try: import tf_keras as keras except (ModuleNotFoundError, ImportError): import keras if parse(keras.__version__).major > 2: raise ValueError( "Your currently installed version of Keras is Keras 3, but this is not yet supported in " "Transformers. Please install the backwards-compatible tf-keras package with " "`pip install tf-keras`." ) logger = logging.getLogger(__name__) AUTO = tf.data.AUTOTUNE def parse_args(): parser = argparse.ArgumentParser(description="Train a masked language model on TPU.") parser.add_argument( "--pretrained_model_config", type=str, default="FacebookAI/roberta-base", help="The model config to use. Note that we don't copy the model's weights, only the config!", ) parser.add_argument( "--tokenizer", type=str, default="unigram-tokenizer-wikitext", help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.", ) parser.add_argument( "--per_replica_batch_size", type=int, default=8, help="Batch size per TPU core.", ) parser.add_argument( "--no_tpu", action="store_true", help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.", ) parser.add_argument( "--tpu_name", type=str, help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.", default="local", ) parser.add_argument( "--tpu_zone", type=str, help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.", ) parser.add_argument( "--gcp_project", type=str, help="Google cloud project name. Only used for non-Colab TPU nodes." ) parser.add_argument( "--bfloat16", action="store_true", help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.", ) parser.add_argument( "--train_dataset", type=str, help="Path to training dataset to load. If the path begins with `gs://`" " then the dataset will be loaded from a Google Cloud Storage bucket.", ) parser.add_argument( "--shuffle_buffer_size", type=int, default=2**18, # Default corresponds to a 1GB buffer for seq_len 512 help="Size of the shuffle buffer (in samples)", ) parser.add_argument( "--eval_dataset", type=str, help="Path to evaluation dataset to load. If the path begins with `gs://`" " then the dataset will be loaded from a Google Cloud Storage bucket.", ) parser.add_argument( "--num_epochs", type=int, default=1, help="Number of epochs to train for.", ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Learning rate to use for training.", ) parser.add_argument( "--weight_decay_rate", type=float, default=1e-3, help="Weight decay rate to use for training.", ) parser.add_argument( "--max_length", type=int, default=512, help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py", ) parser.add_argument( "--mlm_probability", type=float, default=0.15, help="Fraction of tokens to mask during training.", ) parser.add_argument("--output_dir", type=str, required=True, help="Path to save model checkpoints to.") parser.add_argument("--hub_model_id", type=str, help="Model ID to upload to on the Hugging Face Hub.") args = parser.parse_args() return args def initialize_tpu(args): try: if args.tpu_name: tpu = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name, zone=args.tpu_zone, project=args.gcp_project ) else: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( "Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or " "--gcp_project. When running on a TPU VM, use --tpu_name local." ) tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) return tpu def count_samples(file_list): num_samples = 0 for file in file_list: filename = file.split("/")[-1] sample_count = re.search(r"-\d+-(\d+)\.tfrecord", filename).group(1) sample_count = int(sample_count) num_samples += sample_count return num_samples def prepare_dataset(records, decode_fn, mask_fn, batch_size, shuffle, shuffle_buffer_size=None): num_samples = count_samples(records) dataset = tf.data.Dataset.from_tensor_slices(records) if shuffle: dataset = dataset.shuffle(len(dataset)) dataset = tf.data.TFRecordDataset(dataset, num_parallel_reads=AUTO) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here dataset = dataset.apply(tf.data.experimental.assert_cardinality(num_samples)) dataset = dataset.map(decode_fn, num_parallel_calls=AUTO) if shuffle: assert shuffle_buffer_size is not None dataset = dataset.shuffle(args.shuffle_buffer_size) dataset = dataset.batch(batch_size, drop_remainder=True) dataset = dataset.map(mask_fn, num_parallel_calls=AUTO) dataset = dataset.prefetch(AUTO) return dataset def main(args): if not args.no_tpu: tpu = initialize_tpu(args) strategy = tf.distribute.TPUStrategy(tpu) else: strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0") if args.bfloat16: keras.mixed_precision.set_global_policy("mixed_bfloat16") tokenizer = AutoTokenizer.from_pretrained(args.tokenizer) config = AutoConfig.from_pretrained(args.pretrained_model_config) config.vocab_size = tokenizer.vocab_size training_records = tf.io.gfile.glob(os.path.join(args.train_dataset, "*.tfrecord")) if not training_records: raise ValueError(f"No .tfrecord files found in {args.train_dataset}.") eval_records = tf.io.gfile.glob(os.path.join(args.eval_dataset, "*.tfrecord")) if not eval_records: raise ValueError(f"No .tfrecord files found in {args.eval_dataset}.") num_train_samples = count_samples(training_records) steps_per_epoch = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) total_train_steps = steps_per_epoch * args.num_epochs with strategy.scope(): model = TFAutoModelForMaskedLM.from_config(config) model(model.dummy_inputs) # Pass some dummy inputs through the model to ensure all the weights are built optimizer, schedule = create_optimizer( num_train_steps=total_train_steps, num_warmup_steps=total_train_steps // 20, init_lr=args.learning_rate, weight_decay_rate=args.weight_decay_rate, ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=optimizer, metrics=["accuracy"]) def decode_fn(example): features = { "input_ids": tf.io.FixedLenFeature(dtype=tf.int64, shape=(args.max_length,)), "attention_mask": tf.io.FixedLenFeature(dtype=tf.int64, shape=(args.max_length,)), } return tf.io.parse_single_example(example, features) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. data_collator = DataCollatorForLanguageModeling( tokenizer=tokenizer, mlm_probability=args.mlm_probability, mlm=True, return_tensors="tf" ) def mask_with_collator(batch): # TF really needs an isin() function special_tokens_mask = ( ~tf.cast(batch["attention_mask"], tf.bool) | (batch["input_ids"] == tokenizer.cls_token_id) | (batch["input_ids"] == tokenizer.sep_token_id) ) batch["input_ids"], batch["labels"] = data_collator.tf_mask_tokens( batch["input_ids"], vocab_size=len(tokenizer), mask_token_id=tokenizer.mask_token_id, special_tokens_mask=special_tokens_mask, ) return batch batch_size = args.per_replica_batch_size * strategy.num_replicas_in_sync train_dataset = prepare_dataset( training_records, decode_fn=decode_fn, mask_fn=mask_with_collator, batch_size=batch_size, shuffle=True, shuffle_buffer_size=args.shuffle_buffer_size, ) eval_dataset = prepare_dataset( eval_records, decode_fn=decode_fn, mask_fn=mask_with_collator, batch_size=batch_size, shuffle=False, ) callbacks = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir, hub_model_id=args.hub_model_id, tokenizer=tokenizer) ) model.fit( train_dataset, validation_data=eval_dataset, epochs=args.num_epochs, callbacks=callbacks, ) model.save_pretrained(args.output_dir) if __name__ == "__main__": args = parse_args() main(args)
transformers/examples/tensorflow/language-modeling-tpu/run_mlm.py/0
{ "file_path": "transformers/examples/tensorflow/language-modeling-tpu/run_mlm.py", "repo_id": "transformers", "token_count": 4387 }
80
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import json import logging import os import sys from unittest import skip from unittest.mock import patch import tensorflow as tf from packaging.version import parse try: import tf_keras as keras except (ModuleNotFoundError, ImportError): import keras if parse(keras.__version__).major > 2: raise ValueError( "Your currently installed version of Keras is Keras 3, but this is not yet supported in " "Transformers. Please install the backwards-compatible tf-keras package with " "`pip install tf-keras`." ) from transformers.testing_utils import TestCasePlus, get_gpu_count, slow SRC_DIRS = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ "text-generation", "text-classification", "token-classification", "language-modeling", "multiple-choice", "question-answering", "summarization", "translation", "image-classification", ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm import run_image_classification import run_mlm import run_ner import run_qa as run_squad import run_summarization import run_swag import run_text_classification import run_translation logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() def get_setup_file(): parser = argparse.ArgumentParser() parser.add_argument("-f") args = parser.parse_args() return args.f def get_results(output_dir): results = {} path = os.path.join(output_dir, "all_results.json") if os.path.exists(path): with open(path, "r") as f: results = json.load(f) else: raise ValueError(f"can't find {path}") return results def is_cuda_available(): return bool(tf.config.list_physical_devices("GPU")) stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class ExamplesTests(TestCasePlus): @skip("Skipping until shape inference for to_tf_dataset PR is merged.") def test_run_text_classification(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" run_text_classification.py --model_name_or_path distilbert/distilbert-base-uncased --output_dir {tmp_dir} --overwrite_output_dir --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --do_train --do_eval --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --max_steps=10 --warmup_steps=2 --seed=42 --max_seq_length=128 """.split() if is_cuda_available(): testargs.append("--fp16") with patch.object(sys, "argv", testargs): run_text_classification.main() # Reset the mixed precision policy so we don't break other tests keras.mixed_precision.set_global_policy("float32") result = get_results(tmp_dir) self.assertGreaterEqual(result["eval_accuracy"], 0.75) def test_run_clm(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" run_clm.py --model_name_or_path distilbert/distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --block_size 128 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --num_train_epochs 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() if len(tf.config.list_physical_devices("GPU")) > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return with patch.object(sys, "argv", testargs): run_clm.main() result = get_results(tmp_dir) self.assertLess(result["eval_perplexity"], 100) def test_run_mlm(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" run_mlm.py --model_name_or_path distilbert/distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --max_seq_length 64 --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval --prediction_loss_only --num_train_epochs=1 --learning_rate=1e-4 """.split() with patch.object(sys, "argv", testargs): run_mlm.main() result = get_results(tmp_dir) self.assertLess(result["eval_perplexity"], 42) def test_run_ner(self): # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu epochs = 7 if get_gpu_count() > 1 else 2 tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" run_ner.py --model_name_or_path google-bert/bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval --warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 """.split() with patch.object(sys, "argv", testargs): run_ner.main() result = get_results(tmp_dir) self.assertGreaterEqual(result["accuracy"], 0.75) def test_run_squad(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" run_qa.py --model_name_or_path google-bert/bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --overwrite_output_dir --max_steps=10 --warmup_steps=2 --do_train --do_eval --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 """.split() with patch.object(sys, "argv", testargs): run_squad.main() result = get_results(tmp_dir) self.assertGreaterEqual(result["f1"], 30) self.assertGreaterEqual(result["exact"], 30) def test_run_swag(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" run_swag.py --model_name_or_path google-bert/bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --overwrite_output_dir --max_steps=20 --warmup_steps=2 --do_train --do_eval --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 """.split() with patch.object(sys, "argv", testargs): run_swag.main() result = get_results(tmp_dir) self.assertGreaterEqual(result["val_accuracy"], 0.8) @slow def test_run_summarization(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" run_summarization.py --model_name_or_path google-t5/t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --overwrite_output_dir --max_steps=50 --warmup_steps=8 --do_train --do_eval --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 """.split() with patch.object(sys, "argv", testargs): run_summarization.main() result = get_results(tmp_dir) self.assertGreaterEqual(result["rouge1"], 10) self.assertGreaterEqual(result["rouge2"], 2) self.assertGreaterEqual(result["rougeL"], 7) self.assertGreaterEqual(result["rougeLsum"], 7) @slow def test_run_translation(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" run_translation.py --model_name_or_path Rocketknight1/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --overwrite_output_dir --warmup_steps=8 --do_train --do_eval --learning_rate=3e-3 --num_train_epochs 12 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO """.split() with patch.object(sys, "argv", testargs): run_translation.main() result = get_results(tmp_dir) self.assertGreaterEqual(result["bleu"], 30) def test_run_image_classification(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" run_image_classification.py --dataset_name hf-internal-testing/cats_vs_dogs_sample --trust_remote_code --model_name_or_path microsoft/resnet-18 --do_train --do_eval --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --output_dir {tmp_dir} --overwrite_output_dir --dataloader_num_workers 16 --num_train_epochs 2 --train_val_split 0.1 --seed 42 --ignore_mismatched_sizes True """.split() with patch.object(sys, "argv", testargs): run_image_classification.main() result = get_results(tmp_dir) self.assertGreaterEqual(result["accuracy"], 0.7)
transformers/examples/tensorflow/test_tensorflow_examples.py/0
{ "file_path": "transformers/examples/tensorflow/test_tensorflow_examples.py", "repo_id": "transformers", "token_count": 5486 }
81
#!/usr/bin/env python # # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def printflock(*msgs): """solves multi-process interleaved print problem""" with open(__file__, "r") as fh: fcntl.flock(fh, fcntl.LOCK_EX) try: print(*msgs) finally: fcntl.flock(fh, fcntl.LOCK_UN) local_rank = int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(local_rank) device = torch.device("cuda", local_rank) hostname = socket.gethostname() gpu = f"[{hostname}-{local_rank}]" try: # test distributed dist.init_process_group("nccl") dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank rank = dist.get_rank() world_size = dist.get_world_size() printflock(f"{gpu} is OK (global rank: {rank}/{world_size})") dist.barrier() if rank == 0: printflock(f"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}") except Exception: printflock(f"{gpu} is broken") raise
transformers/scripts/distributed/torch-distributed-gpu-test.py/0
{ "file_path": "transformers/scripts/distributed/torch-distributed-gpu-test.py", "repo_id": "transformers", "token_count": 1240 }
82
<!--- Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> Setup transformers following instructions in README.md, (I would fork first). ```bash git clone git@github.com:huggingface/transformers.git cd transformers pip install -e . pip install pandas GitPython wget ``` Get required metadata ```bash curl https://cdn-datasets.huggingface.co/language_codes/language-codes-3b2.csv > language-codes-3b2.csv curl https://cdn-datasets.huggingface.co/language_codes/iso-639-3.csv > iso-639-3.csv ``` Install Tatoeba-Challenge repo inside transformers ```bash git clone git@github.com:Helsinki-NLP/Tatoeba-Challenge.git ``` To convert a few models, call the conversion script from command line: ```bash python src/transformers/models/marian/convert_marian_tatoeba_to_pytorch.py --models heb-eng eng-heb --save_dir converted ``` To convert lots of models you can pass your list of Tatoeba model names to `resolver.convert_models` in a python client or script. ```python from transformers.convert_marian_tatoeba_to_pytorch import TatoebaConverter resolver = TatoebaConverter(save_dir='converted') resolver.convert_models(['heb-eng', 'eng-heb']) ``` ### Upload converted models Since version v3.5.0, the model sharing workflow is switched to git-based system . Refer to [model sharing doc](https://huggingface.co/transformers/main/model_sharing.html#model-sharing-and-uploading) for more details. To upload all converted models, 1. Install [git-lfs](https://git-lfs.github.com/). 2. Login to `huggingface-cli` ```bash huggingface-cli login ``` 3. Run the `upload_models` script ```bash ./scripts/tatoeba/upload_models.sh ``` ### Modifications - To change naming logic, change the code near `os.rename`. The model card creation code may also need to change. - To change model card content, you must modify `TatoebaCodeResolver.write_model_card`
transformers/scripts/tatoeba/README.md/0
{ "file_path": "transformers/scripts/tatoeba/README.md", "repo_id": "transformers", "token_count": 761 }
83
#!/usr/bin/env python # coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ast import builtins import difflib from collections.abc import Mapping from importlib import import_module from typing import Any, Callable, Dict, List, Optional import numpy as np from ..utils import is_pandas_available if is_pandas_available(): import pandas as pd class InterpreterError(ValueError): """ An error raised when the interpretor cannot evaluate a Python expression, due to syntax error or unsupported operations. """ pass ERRORS = { name: getattr(builtins, name) for name in dir(builtins) if isinstance(getattr(builtins, name), type) and issubclass(getattr(builtins, name), BaseException) } LIST_SAFE_MODULES = [ "random", "collections", "math", "time", "queue", "itertools", "re", "stat", "statistics", "unicodedata", ] PRINT_OUTPUTS, MAX_LEN_OUTPUT = "", 50000 OPERATIONS_COUNT, MAX_OPERATIONS = 0, 10000000 class BreakException(Exception): pass class ContinueException(Exception): pass class ReturnException(Exception): def __init__(self, value): self.value = value def get_iterable(obj): if isinstance(obj, list): return obj elif hasattr(obj, "__iter__"): return list(obj) else: raise InterpreterError("Object is not iterable") def evaluate_unaryop(expression, state, static_tools, custom_tools): operand = evaluate_ast(expression.operand, state, static_tools, custom_tools) if isinstance(expression.op, ast.USub): return -operand elif isinstance(expression.op, ast.UAdd): return operand elif isinstance(expression.op, ast.Not): return not operand elif isinstance(expression.op, ast.Invert): return ~operand else: raise InterpreterError(f"Unary operation {expression.op.__class__.__name__} is not supported.") def evaluate_lambda(lambda_expression, state, static_tools, custom_tools): args = [arg.arg for arg in lambda_expression.args.args] def lambda_func(*values): new_state = state.copy() for arg, value in zip(args, values): new_state[arg] = value return evaluate_ast(lambda_expression.body, new_state, static_tools, custom_tools) return lambda_func def evaluate_while(while_loop, state, static_tools, custom_tools): max_iterations = 1000 iterations = 0 while evaluate_ast(while_loop.test, state, static_tools, custom_tools): for node in while_loop.body: try: evaluate_ast(node, state, static_tools, custom_tools) except BreakException: return None except ContinueException: break iterations += 1 if iterations > max_iterations: raise InterpreterError(f"Maximum number of {max_iterations} iterations in While loop exceeded") return None def create_function(func_def, state, static_tools, custom_tools): def new_func(*args, **kwargs): func_state = state.copy() arg_names = [arg.arg for arg in func_def.args.args] default_values = [evaluate_ast(d, state, static_tools, custom_tools) for d in func_def.args.defaults] # Apply default values defaults = dict(zip(arg_names[-len(default_values) :], default_values)) # Set positional arguments for name, value in zip(arg_names, args): func_state[name] = value # # Set keyword arguments for name, value in kwargs.items(): func_state[name] = value # Handle variable arguments if func_def.args.vararg: vararg_name = func_def.args.vararg.arg func_state[vararg_name] = args if func_def.args.kwarg: kwarg_name = func_def.args.kwarg.arg func_state[kwarg_name] = kwargs # Set default values for arguments that were not provided for name, value in defaults.items(): if name not in func_state: func_state[name] = value # Update function state with self and __class__ if func_def.args.args and func_def.args.args[0].arg == "self": if args: func_state["self"] = args[0] func_state["__class__"] = args[0].__class__ result = None try: for stmt in func_def.body: result = evaluate_ast(stmt, func_state, static_tools, custom_tools) except ReturnException as e: result = e.value return result return new_func def create_class(class_name, class_bases, class_body): class_dict = {} for key, value in class_body.items(): class_dict[key] = value return type(class_name, tuple(class_bases), class_dict) def evaluate_function_def(func_def, state, static_tools, custom_tools): custom_tools[func_def.name] = create_function(func_def, state, static_tools, custom_tools) return custom_tools[func_def.name] def evaluate_class_def(class_def, state, static_tools, custom_tools): class_name = class_def.name bases = [evaluate_ast(base, state, static_tools, custom_tools) for base in class_def.bases] class_dict = {} for stmt in class_def.body: if isinstance(stmt, ast.FunctionDef): class_dict[stmt.name] = evaluate_function_def(stmt, state, static_tools, custom_tools) elif isinstance(stmt, ast.Assign): for target in stmt.targets: if isinstance(target, ast.Name): class_dict[target.id] = evaluate_ast(stmt.value, state, static_tools, custom_tools) elif isinstance(target, ast.Attribute): class_dict[target.attr] = evaluate_ast(stmt.value, state, static_tools, custom_tools) else: raise InterpreterError(f"Unsupported statement in class body: {stmt.__class__.__name__}") new_class = type(class_name, tuple(bases), class_dict) state[class_name] = new_class return new_class def evaluate_augassign(expression, state, static_tools, custom_tools): # Helper function to get current value and set new value based on the target type def get_current_value(target): if isinstance(target, ast.Name): return state.get(target.id, 0) elif isinstance(target, ast.Subscript): obj = evaluate_ast(target.value, state, static_tools, custom_tools) key = evaluate_ast(target.slice, state, static_tools, custom_tools) return obj[key] elif isinstance(target, ast.Attribute): obj = evaluate_ast(target.value, state, static_tools, custom_tools) return getattr(obj, target.attr) elif isinstance(target, ast.Tuple): return tuple(get_current_value(elt) for elt in target.elts) elif isinstance(target, ast.List): return [get_current_value(elt) for elt in target.elts] else: raise InterpreterError("AugAssign not supported for {type(target)} targets.") current_value = get_current_value(expression.target) value_to_add = evaluate_ast(expression.value, state, static_tools, custom_tools) # Determine the operation and apply it if isinstance(expression.op, ast.Add): if isinstance(current_value, list): if not isinstance(value_to_add, list): raise InterpreterError(f"Cannot add non-list value {value_to_add} to a list.") updated_value = current_value + value_to_add else: updated_value = current_value + value_to_add elif isinstance(expression.op, ast.Sub): updated_value = current_value - value_to_add elif isinstance(expression.op, ast.Mult): updated_value = current_value * value_to_add elif isinstance(expression.op, ast.Div): updated_value = current_value / value_to_add elif isinstance(expression.op, ast.Mod): updated_value = current_value % value_to_add elif isinstance(expression.op, ast.Pow): updated_value = current_value**value_to_add elif isinstance(expression.op, ast.FloorDiv): updated_value = current_value // value_to_add elif isinstance(expression.op, ast.BitAnd): updated_value = current_value & value_to_add elif isinstance(expression.op, ast.BitOr): updated_value = current_value | value_to_add elif isinstance(expression.op, ast.BitXor): updated_value = current_value ^ value_to_add elif isinstance(expression.op, ast.LShift): updated_value = current_value << value_to_add elif isinstance(expression.op, ast.RShift): updated_value = current_value >> value_to_add else: raise InterpreterError(f"Operation {type(expression.op).__name__} is not supported.") # Update the state set_value(expression.target, updated_value, state, static_tools, custom_tools) return updated_value def evaluate_boolop(node, state, static_tools, custom_tools): if isinstance(node.op, ast.And): for value in node.values: if not evaluate_ast(value, state, static_tools, custom_tools): return False return True elif isinstance(node.op, ast.Or): for value in node.values: if evaluate_ast(value, state, static_tools, custom_tools): return True return False def evaluate_binop(binop, state, static_tools, custom_tools): # Recursively evaluate the left and right operands left_val = evaluate_ast(binop.left, state, static_tools, custom_tools) right_val = evaluate_ast(binop.right, state, static_tools, custom_tools) # Determine the operation based on the type of the operator in the BinOp if isinstance(binop.op, ast.Add): return left_val + right_val elif isinstance(binop.op, ast.Sub): return left_val - right_val elif isinstance(binop.op, ast.Mult): return left_val * right_val elif isinstance(binop.op, ast.Div): return left_val / right_val elif isinstance(binop.op, ast.Mod): return left_val % right_val elif isinstance(binop.op, ast.Pow): return left_val**right_val elif isinstance(binop.op, ast.FloorDiv): return left_val // right_val elif isinstance(binop.op, ast.BitAnd): return left_val & right_val elif isinstance(binop.op, ast.BitOr): return left_val | right_val elif isinstance(binop.op, ast.BitXor): return left_val ^ right_val elif isinstance(binop.op, ast.LShift): return left_val << right_val elif isinstance(binop.op, ast.RShift): return left_val >> right_val else: raise NotImplementedError(f"Binary operation {type(binop.op).__name__} is not implemented.") def evaluate_assign(assign, state, static_tools, custom_tools): result = evaluate_ast(assign.value, state, static_tools, custom_tools) if len(assign.targets) == 1: target = assign.targets[0] set_value(target, result, state, static_tools, custom_tools) else: if len(assign.targets) != len(result): raise InterpreterError(f"Assign failed: expected {len(result)} values but got {len(assign.targets)}.") expanded_values = [] for tgt in assign.targets: if isinstance(tgt, ast.Starred): expanded_values.extend(result) else: expanded_values.append(result) for tgt, val in zip(assign.targets, expanded_values): set_value(tgt, val, state, static_tools, custom_tools) return result def set_value(target, value, state, static_tools, custom_tools): if isinstance(target, ast.Name): if target.id in static_tools: raise InterpreterError(f"Cannot assign to name '{target.id}': doing this would erase the existing tool!") state[target.id] = value elif isinstance(target, ast.Tuple): if not isinstance(value, tuple): if hasattr(value, "__iter__") and not isinstance(value, (str, bytes)): value = tuple(value) else: raise InterpreterError("Cannot unpack non-tuple value") if len(target.elts) != len(value): raise InterpreterError("Cannot unpack tuple of wrong size") for i, elem in enumerate(target.elts): set_value(elem, value[i], state, static_tools, custom_tools) elif isinstance(target, ast.Subscript): obj = evaluate_ast(target.value, state, static_tools, custom_tools) key = evaluate_ast(target.slice, state, static_tools, custom_tools) obj[key] = value elif isinstance(target, ast.Attribute): obj = evaluate_ast(target.value, state, static_tools, custom_tools) setattr(obj, target.attr, value) def evaluate_call(call, state, static_tools, custom_tools): if not (isinstance(call.func, ast.Attribute) or isinstance(call.func, ast.Name)): raise InterpreterError(f"This is not a correct function: {call.func}).") if isinstance(call.func, ast.Attribute): obj = evaluate_ast(call.func.value, state, static_tools, custom_tools) func_name = call.func.attr if not hasattr(obj, func_name): raise InterpreterError(f"Object {obj} has no attribute {func_name}") func = getattr(obj, func_name) elif isinstance(call.func, ast.Name): func_name = call.func.id if func_name in state: func = state[func_name] elif func_name in static_tools: func = static_tools[func_name] elif func_name in custom_tools: func = custom_tools[func_name] elif func_name in ERRORS: func = ERRORS[func_name] else: raise InterpreterError( f"It is not permitted to evaluate other functions than the provided tools or functions defined in previous code (tried to execute {call.func.id})." ) args = [] for arg in call.args: if isinstance(arg, ast.Starred): args.extend(evaluate_ast(arg.value, state, static_tools, custom_tools)) else: args.append(evaluate_ast(arg, state, static_tools, custom_tools)) args = [] for arg in call.args: if isinstance(arg, ast.Starred): unpacked = evaluate_ast(arg.value, state, static_tools, custom_tools) if not hasattr(unpacked, "__iter__") or isinstance(unpacked, (str, bytes)): raise InterpreterError(f"Cannot unpack non-iterable value {unpacked}") args.extend(unpacked) else: args.append(evaluate_ast(arg, state, static_tools, custom_tools)) kwargs = {keyword.arg: evaluate_ast(keyword.value, state, static_tools, custom_tools) for keyword in call.keywords} if isinstance(func, type) and len(func.__module__.split(".")) > 1: # Check for user-defined classes # Instantiate the class using its constructor obj = func.__new__(func) # Create a new instance of the class if hasattr(obj, "__init__"): # Check if the class has an __init__ method obj.__init__(*args, **kwargs) # Call the __init__ method correctly return obj else: if func_name == "super": if not args: if "__class__" in state and "self" in state: return super(state["__class__"], state["self"]) else: raise InterpreterError("super() needs at least one argument") cls = args[0] if not isinstance(cls, type): raise InterpreterError("super() argument 1 must be type") if len(args) == 1: return super(cls) elif len(args) == 2: instance = args[1] return super(cls, instance) else: raise InterpreterError("super() takes at most 2 arguments") else: if func_name == "print": output = " ".join(map(str, args)) global PRINT_OUTPUTS PRINT_OUTPUTS += output + "\n" # cap the number of lines return None else: # Assume it's a callable object output = func(*args, **kwargs) return output def evaluate_subscript(subscript, state, static_tools, custom_tools): index = evaluate_ast(subscript.slice, state, static_tools, custom_tools) value = evaluate_ast(subscript.value, state, static_tools, custom_tools) if isinstance(value, str) and isinstance(index, str): raise InterpreterError("You're trying to subscript a string with a string index, which is impossible") if isinstance(value, pd.core.indexing._LocIndexer): parent_object = value.obj return parent_object.loc[index] if isinstance(value, (pd.DataFrame, pd.Series, np.ndarray)): return value[index] elif isinstance(value, pd.core.groupby.generic.DataFrameGroupBy): return value[index] elif isinstance(index, slice): return value[index] elif isinstance(value, (list, tuple)): if not (-len(value) <= index < len(value)): raise InterpreterError(f"Index {index} out of bounds for list of length {len(value)}") return value[int(index)] elif isinstance(value, str): if not (-len(value) <= index < len(value)): raise InterpreterError(f"Index {index} out of bounds for string of length {len(value)}") return value[index] elif index in value: return value[index] elif isinstance(index, str) and isinstance(value, Mapping): close_matches = difflib.get_close_matches(index, list(value.keys())) if len(close_matches) > 0: return value[close_matches[0]] raise InterpreterError(f"Could not index {value} with '{index}'.") def evaluate_name(name, state, static_tools, custom_tools): if name.id in state: return state[name.id] elif name.id in static_tools: return static_tools[name.id] elif name.id in ERRORS: return ERRORS[name.id] close_matches = difflib.get_close_matches(name.id, list(state.keys())) if len(close_matches) > 0: return state[close_matches[0]] raise InterpreterError(f"The variable `{name.id}` is not defined.") def evaluate_condition(condition, state, static_tools, custom_tools): left = evaluate_ast(condition.left, state, static_tools, custom_tools) comparators = [evaluate_ast(c, state, static_tools, custom_tools) for c in condition.comparators] ops = [type(op) for op in condition.ops] result = True current_left = left for op, comparator in zip(ops, comparators): if op == ast.Eq: current_result = current_left == comparator elif op == ast.NotEq: current_result = current_left != comparator elif op == ast.Lt: current_result = current_left < comparator elif op == ast.LtE: current_result = current_left <= comparator elif op == ast.Gt: current_result = current_left > comparator elif op == ast.GtE: current_result = current_left >= comparator elif op == ast.Is: current_result = current_left is comparator elif op == ast.IsNot: current_result = current_left is not comparator elif op == ast.In: current_result = current_left in comparator elif op == ast.NotIn: current_result = current_left not in comparator else: raise InterpreterError(f"Operator not supported: {op}") result = result & current_result current_left = comparator if isinstance(result, bool) and not result: break return result if isinstance(result, (bool, pd.Series)) else result.all() def evaluate_if(if_statement, state, static_tools, custom_tools): result = None test_result = evaluate_ast(if_statement.test, state, static_tools, custom_tools) if test_result: for line in if_statement.body: line_result = evaluate_ast(line, state, static_tools, custom_tools) if line_result is not None: result = line_result else: for line in if_statement.orelse: line_result = evaluate_ast(line, state, static_tools, custom_tools) if line_result is not None: result = line_result return result def evaluate_for(for_loop, state, static_tools, custom_tools): result = None iterator = evaluate_ast(for_loop.iter, state, static_tools, custom_tools) for counter in iterator: set_value(for_loop.target, counter, state, static_tools, custom_tools) for node in for_loop.body: try: line_result = evaluate_ast(node, state, static_tools, custom_tools) if line_result is not None: result = line_result except BreakException: break except ContinueException: continue else: continue break return result def evaluate_listcomp(listcomp, state, static_tools, custom_tools): def inner_evaluate(generators, index, current_state): if index >= len(generators): return [evaluate_ast(listcomp.elt, current_state, static_tools, custom_tools)] generator = generators[index] iter_value = evaluate_ast(generator.iter, current_state, static_tools, custom_tools) result = [] for value in iter_value: new_state = current_state.copy() if isinstance(generator.target, ast.Tuple): for idx, elem in enumerate(generator.target.elts): new_state[elem.id] = value[idx] else: new_state[generator.target.id] = value if all(evaluate_ast(if_clause, new_state, static_tools, custom_tools) for if_clause in generator.ifs): result.extend(inner_evaluate(generators, index + 1, new_state)) return result return inner_evaluate(listcomp.generators, 0, state) def evaluate_try(try_node, state, static_tools, custom_tools): try: for stmt in try_node.body: evaluate_ast(stmt, state, static_tools, custom_tools) except Exception as e: matched = False for handler in try_node.handlers: if handler.type is None or isinstance(e, evaluate_ast(handler.type, state, static_tools, custom_tools)): matched = True if handler.name: state[handler.name] = e for stmt in handler.body: evaluate_ast(stmt, state, static_tools, custom_tools) break if not matched: raise e else: if try_node.orelse: for stmt in try_node.orelse: evaluate_ast(stmt, state, static_tools, custom_tools) finally: if try_node.finalbody: for stmt in try_node.finalbody: evaluate_ast(stmt, state, static_tools, custom_tools) def evaluate_raise(raise_node, state, static_tools, custom_tools): if raise_node.exc is not None: exc = evaluate_ast(raise_node.exc, state, static_tools, custom_tools) else: exc = None if raise_node.cause is not None: cause = evaluate_ast(raise_node.cause, state, static_tools, custom_tools) else: cause = None if exc is not None: if cause is not None: raise exc from cause else: raise exc else: raise InterpreterError("Re-raise is not supported without an active exception") def evaluate_assert(assert_node, state, static_tools, custom_tools): test_result = evaluate_ast(assert_node.test, state, static_tools, custom_tools) if not test_result: if assert_node.msg: msg = evaluate_ast(assert_node.msg, state, static_tools, custom_tools) raise AssertionError(msg) else: # Include the failing condition in the assertion message test_code = ast.unparse(assert_node.test) raise AssertionError(f"Assertion failed: {test_code}") def evaluate_with(with_node, state, static_tools, custom_tools): contexts = [] for item in with_node.items: context_expr = evaluate_ast(item.context_expr, state, static_tools, custom_tools) if item.optional_vars: state[item.optional_vars.id] = context_expr.__enter__() contexts.append(state[item.optional_vars.id]) else: context_var = context_expr.__enter__() contexts.append(context_var) try: for stmt in with_node.body: evaluate_ast(stmt, state, static_tools, custom_tools) except Exception as e: for context in reversed(contexts): context.__exit__(type(e), e, e.__traceback__) raise else: for context in reversed(contexts): context.__exit__(None, None, None) def import_modules(expression, state, authorized_imports): def check_module_authorized(module_name): module_path = module_name.split(".") module_subpaths = [".".join(module_path[:i]) for i in range(1, len(module_path) + 1)] return any(subpath in authorized_imports for subpath in module_subpaths) if isinstance(expression, ast.Import): for alias in expression.names: if check_module_authorized(alias.name): module = import_module(alias.name) state[alias.asname or alias.name] = module else: raise InterpreterError( f"Import of {alias.name} is not allowed. Authorized imports are: {str(authorized_imports)}" ) return None elif isinstance(expression, ast.ImportFrom): if check_module_authorized(expression.module): module = __import__(expression.module, fromlist=[alias.name for alias in expression.names]) for alias in expression.names: state[alias.asname or alias.name] = getattr(module, alias.name) else: raise InterpreterError(f"Import from {expression.module} is not allowed.") return None def evaluate_dictcomp(dictcomp, state, static_tools, custom_tools): result = {} for gen in dictcomp.generators: iter_value = evaluate_ast(gen.iter, state, static_tools, custom_tools) for value in iter_value: new_state = state.copy() set_value(gen.target, value, new_state, static_tools, custom_tools) if all(evaluate_ast(if_clause, new_state, static_tools, custom_tools) for if_clause in gen.ifs): key = evaluate_ast(dictcomp.key, new_state, static_tools, custom_tools) val = evaluate_ast(dictcomp.value, new_state, static_tools, custom_tools) result[key] = val return result def evaluate_ast( expression: ast.AST, state: Dict[str, Any], static_tools: Dict[str, Callable], custom_tools: Dict[str, Callable], authorized_imports: List[str] = LIST_SAFE_MODULES, ): """ Evaluate an abstract syntax tree using the content of the variables stored in a state and only evaluating a given set of functions. This function will recurse trough the nodes of the tree provided. Args: expression (`ast.AST`): The code to evaluate, as an abstract syntax tree. state (`Dict[str, Any]`): A dictionary mapping variable names to values. The `state` is updated if need be when the evaluation encounters assignements. static_tools (`Dict[str, Callable]`): Functions that may be called during the evaluation. Trying to change one of these static_tools will raise an error. custom_tools (`Dict[str, Callable]`): Functions that may be called during the evaluation. These static_tools can be overwritten. authorized_imports (`List[str]`): The list of modules that can be imported by the code. By default, only a few safe modules are allowed. Add more at your own risk! """ global OPERATIONS_COUNT if OPERATIONS_COUNT >= MAX_OPERATIONS: raise InterpreterError( f"Reached the max number of operations of {MAX_OPERATIONS}. Maybe there is an infinite loop somewhere in the code, or you're just asking too many calculations." ) OPERATIONS_COUNT += 1 if isinstance(expression, ast.Assign): # Assignement -> we evaluate the assignment which should update the state # We return the variable assigned as it may be used to determine the final result. return evaluate_assign(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.AugAssign): return evaluate_augassign(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.Call): # Function call -> we return the value of the function call return evaluate_call(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.Constant): # Constant -> just return the value return expression.value elif isinstance(expression, ast.Tuple): return tuple(evaluate_ast(elt, state, static_tools, custom_tools) for elt in expression.elts) elif isinstance(expression, (ast.ListComp, ast.GeneratorExp)): return evaluate_listcomp(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.UnaryOp): return evaluate_unaryop(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.Starred): return evaluate_ast(expression.value, state, static_tools, custom_tools) elif isinstance(expression, ast.BoolOp): # Boolean operation -> evaluate the operation return evaluate_boolop(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.Break): raise BreakException() elif isinstance(expression, ast.Continue): raise ContinueException() elif isinstance(expression, ast.BinOp): # Binary operation -> execute operation return evaluate_binop(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.Compare): # Comparison -> evaluate the comparison return evaluate_condition(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.Lambda): return evaluate_lambda(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.FunctionDef): return evaluate_function_def(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.Dict): # Dict -> evaluate all keys and values keys = [evaluate_ast(k, state, static_tools, custom_tools) for k in expression.keys] values = [evaluate_ast(v, state, static_tools, custom_tools) for v in expression.values] return dict(zip(keys, values)) elif isinstance(expression, ast.Expr): # Expression -> evaluate the content return evaluate_ast(expression.value, state, static_tools, custom_tools) elif isinstance(expression, ast.For): # For loop -> execute the loop return evaluate_for(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.FormattedValue): # Formatted value (part of f-string) -> evaluate the content and return return evaluate_ast(expression.value, state, static_tools, custom_tools) elif isinstance(expression, ast.If): # If -> execute the right branch return evaluate_if(expression, state, static_tools, custom_tools) elif hasattr(ast, "Index") and isinstance(expression, ast.Index): return evaluate_ast(expression.value, state, static_tools, custom_tools) elif isinstance(expression, ast.JoinedStr): return "".join([str(evaluate_ast(v, state, static_tools, custom_tools)) for v in expression.values]) elif isinstance(expression, ast.List): # List -> evaluate all elements return [evaluate_ast(elt, state, static_tools, custom_tools) for elt in expression.elts] elif isinstance(expression, ast.Name): # Name -> pick up the value in the state return evaluate_name(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.Subscript): # Subscript -> return the value of the indexing return evaluate_subscript(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.IfExp): test_val = evaluate_ast(expression.test, state, static_tools, custom_tools) if test_val: return evaluate_ast(expression.body, state, static_tools, custom_tools) else: return evaluate_ast(expression.orelse, state, static_tools, custom_tools) elif isinstance(expression, ast.Attribute): value = evaluate_ast(expression.value, state, static_tools, custom_tools) return getattr(value, expression.attr) elif isinstance(expression, ast.Slice): return slice( evaluate_ast(expression.lower, state, static_tools, custom_tools) if expression.lower is not None else None, evaluate_ast(expression.upper, state, static_tools, custom_tools) if expression.upper is not None else None, evaluate_ast(expression.step, state, static_tools, custom_tools) if expression.step is not None else None, ) elif isinstance(expression, ast.DictComp): return evaluate_dictcomp(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.While): return evaluate_while(expression, state, static_tools, custom_tools) elif isinstance(expression, (ast.Import, ast.ImportFrom)): return import_modules(expression, state, authorized_imports) elif isinstance(expression, ast.ClassDef): return evaluate_class_def(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.Try): return evaluate_try(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.Raise): return evaluate_raise(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.Assert): return evaluate_assert(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.With): return evaluate_with(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.Set): return {evaluate_ast(elt, state, static_tools, custom_tools) for elt in expression.elts} elif isinstance(expression, ast.Return): raise ReturnException( evaluate_ast(expression.value, state, static_tools, custom_tools) if expression.value else None ) else: # For now we refuse anything else. Let's add things as we need them. raise InterpreterError(f"{expression.__class__.__name__} is not supported.") def truncate_print_outputs(print_outputs: str, max_len_outputs: int = MAX_LEN_OUTPUT) -> str: if len(print_outputs) < max_len_outputs: return print_outputs else: return f"Print outputs:\n{print_outputs[:max_len_outputs]}\n_Print outputs have been truncated over the limit of {max_len_outputs} characters._\n" def evaluate_python_code( code: str, static_tools: Optional[Dict[str, Callable]] = None, custom_tools: Optional[Dict[str, Callable]] = None, state: Optional[Dict[str, Any]] = None, authorized_imports: List[str] = LIST_SAFE_MODULES, ): """ Evaluate a python expression using the content of the variables stored in a state and only evaluating a given set of functions. This function will recurse through the nodes of the tree provided. Args: code (`str`): The code to evaluate. static_tools (`Dict[str, Callable]`): The functions that may be called during the evaluation. These tools cannot be overwritten in the code: any assignment to their name will raise an error. custom_tools (`Dict[str, Callable]`): The functions that may be called during the evaluation. These tools can be overwritten in the code: any assignment to their name will overwrite them. state (`Dict[str, Any]`): A dictionary mapping variable names to values. The `state` should contain the initial inputs but will be updated by this function to contain all variables as they are evaluated. The print outputs will be stored in the state under the key 'print_outputs'. """ try: expression = ast.parse(code) except SyntaxError as e: raise SyntaxError(f"The code generated by the agent is not valid.\n{e}") if state is None: state = {} if static_tools is None: static_tools = {} if custom_tools is None: custom_tools = {} result = None global PRINT_OUTPUTS PRINT_OUTPUTS = "" global OPERATIONS_COUNT OPERATIONS_COUNT = 0 try: for node in expression.body: result = evaluate_ast(node, state, static_tools, custom_tools, authorized_imports) state["print_outputs"] = truncate_print_outputs(PRINT_OUTPUTS, max_len_outputs=MAX_LEN_OUTPUT) return result except InterpreterError as e: msg = truncate_print_outputs(PRINT_OUTPUTS, max_len_outputs=MAX_LEN_OUTPUT) msg += f"EXECUTION FAILED:\nEvaluation stopped at line '{ast.get_source_segment(code, node)}' because of the following error:\n{e}" raise InterpreterError(msg)
transformers/src/transformers/agents/python_interpreter.py/0
{ "file_path": "transformers/src/transformers/agents/python_interpreter.py", "repo_id": "transformers", "token_count": 15397 }
84
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser, Namespace from typing import Any, List, Optional from ..pipelines import Pipeline, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand try: from fastapi import Body, FastAPI, HTTPException from fastapi.routing import APIRoute from pydantic import BaseModel from starlette.responses import JSONResponse from uvicorn import run _serve_dependencies_installed = True except (ImportError, AttributeError): BaseModel = object def Body(*x, **y): pass _serve_dependencies_installed = False logger = logging.get_logger("transformers-cli/serving") def serve_command_factory(args: Namespace): """ Factory function used to instantiate serving server from provided command line arguments. Returns: ServeCommand """ nlp = pipeline( task=args.task, model=args.model if args.model else None, config=args.config, tokenizer=args.tokenizer, device=args.device, ) return ServeCommand(nlp, args.host, args.port, args.workers) class ServeModelInfoResult(BaseModel): """ Expose model information """ infos: dict class ServeTokenizeResult(BaseModel): """ Tokenize result model """ tokens: List[str] tokens_ids: Optional[List[int]] class ServeDeTokenizeResult(BaseModel): """ DeTokenize result model """ text: str class ServeForwardResult(BaseModel): """ Forward result model """ output: Any class ServeCommand(BaseTransformersCLICommand): @staticmethod def register_subcommand(parser: ArgumentParser): """ Register this command to argparse so it's available for the transformer-cli Args: parser: Root parser to register command-specific arguments """ serve_parser = parser.add_parser( "serve", help="CLI tool to run inference requests through REST and GraphQL endpoints." ) serve_parser.add_argument( "--task", type=str, choices=get_supported_tasks(), help="The task to run the pipeline on", ) serve_parser.add_argument("--host", type=str, default="localhost", help="Interface the server will listen on.") serve_parser.add_argument("--port", type=int, default=8888, help="Port the serving will listen to.") serve_parser.add_argument("--workers", type=int, default=1, help="Number of http workers") serve_parser.add_argument("--model", type=str, help="Model's name or path to stored model.") serve_parser.add_argument("--config", type=str, help="Model's config name or path to stored model.") serve_parser.add_argument("--tokenizer", type=str, help="Tokenizer name to use.") serve_parser.add_argument( "--device", type=int, default=-1, help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)", ) serve_parser.set_defaults(func=serve_command_factory) def __init__(self, pipeline: Pipeline, host: str, port: int, workers: int): self._pipeline = pipeline self.host = host self.port = port self.workers = workers if not _serve_dependencies_installed: raise RuntimeError( "Using serve command requires FastAPI and uvicorn. " 'Please install transformers with [serving]: pip install "transformers[serving]". ' "Or install FastAPI and uvicorn separately." ) else: logger.info(f"Serving model over {host}:{port}") self._app = FastAPI( routes=[ APIRoute( "/", self.model_info, response_model=ServeModelInfoResult, response_class=JSONResponse, methods=["GET"], ), APIRoute( "/tokenize", self.tokenize, response_model=ServeTokenizeResult, response_class=JSONResponse, methods=["POST"], ), APIRoute( "/detokenize", self.detokenize, response_model=ServeDeTokenizeResult, response_class=JSONResponse, methods=["POST"], ), APIRoute( "/forward", self.forward, response_model=ServeForwardResult, response_class=JSONResponse, methods=["POST"], ), ], timeout=600, ) def run(self): run(self._app, host=self.host, port=self.port, workers=self.workers) def model_info(self): return ServeModelInfoResult(infos=vars(self._pipeline.model.config)) def tokenize(self, text_input: str = Body(None, embed=True), return_ids: bool = Body(False, embed=True)): """ Tokenize the provided input and eventually returns corresponding tokens id: - **text_input**: String to tokenize - **return_ids**: Boolean flags indicating if the tokens have to be converted to their integer mapping. """ try: tokens_txt = self._pipeline.tokenizer.tokenize(text_input) if return_ids: tokens_ids = self._pipeline.tokenizer.convert_tokens_to_ids(tokens_txt) return ServeTokenizeResult(tokens=tokens_txt, tokens_ids=tokens_ids) else: return ServeTokenizeResult(tokens=tokens_txt) except Exception as e: raise HTTPException(status_code=500, detail={"model": "", "error": str(e)}) def detokenize( self, tokens_ids: List[int] = Body(None, embed=True), skip_special_tokens: bool = Body(False, embed=True), cleanup_tokenization_spaces: bool = Body(True, embed=True), ): """ Detokenize the provided tokens ids to readable text: - **tokens_ids**: List of tokens ids - **skip_special_tokens**: Flag indicating to not try to decode special tokens - **cleanup_tokenization_spaces**: Flag indicating to remove all leading/trailing spaces and intermediate ones. """ try: decoded_str = self._pipeline.tokenizer.decode(tokens_ids, skip_special_tokens, cleanup_tokenization_spaces) return ServeDeTokenizeResult(model="", text=decoded_str) except Exception as e: raise HTTPException(status_code=500, detail={"model": "", "error": str(e)}) async def forward(self, inputs=Body(None, embed=True)): """ **inputs**: **attention_mask**: **tokens_type_ids**: """ # Check we don't have empty string if len(inputs) == 0: return ServeForwardResult(output=[], attention=[]) try: # Forward through the model output = self._pipeline(inputs) return ServeForwardResult(output=output) except Exception as e: raise HTTPException(500, {"error": str(e)})
transformers/src/transformers/commands/serving.py/0
{ "file_path": "transformers/src/transformers/commands/serving.py", "repo_id": "transformers", "token_count": 3477 }
85
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import f1_score, matthews_corrcoef DEPRECATION_WARNING = ( "This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate " "library. You can have a look at this example script for pointers: " "https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" ) def simple_accuracy(preds, labels): warnings.warn(DEPRECATION_WARNING, FutureWarning) requires_backends(simple_accuracy, "sklearn") return (preds == labels).mean() def acc_and_f1(preds, labels): warnings.warn(DEPRECATION_WARNING, FutureWarning) requires_backends(acc_and_f1, "sklearn") acc = simple_accuracy(preds, labels) f1 = f1_score(y_true=labels, y_pred=preds) return { "acc": acc, "f1": f1, "acc_and_f1": (acc + f1) / 2, } def pearson_and_spearman(preds, labels): warnings.warn(DEPRECATION_WARNING, FutureWarning) requires_backends(pearson_and_spearman, "sklearn") pearson_corr = pearsonr(preds, labels)[0] spearman_corr = spearmanr(preds, labels)[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def glue_compute_metrics(task_name, preds, labels): warnings.warn(DEPRECATION_WARNING, FutureWarning) requires_backends(glue_compute_metrics, "sklearn") assert len(preds) == len(labels), f"Predictions and labels have mismatched lengths {len(preds)} and {len(labels)}" if task_name == "cola": return {"mcc": matthews_corrcoef(labels, preds)} elif task_name == "sst-2": return {"acc": simple_accuracy(preds, labels)} elif task_name == "mrpc": return acc_and_f1(preds, labels) elif task_name == "sts-b": return pearson_and_spearman(preds, labels) elif task_name == "qqp": return acc_and_f1(preds, labels) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(preds, labels)} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(preds, labels)} elif task_name == "qnli": return {"acc": simple_accuracy(preds, labels)} elif task_name == "rte": return {"acc": simple_accuracy(preds, labels)} elif task_name == "wnli": return {"acc": simple_accuracy(preds, labels)} elif task_name == "hans": return {"acc": simple_accuracy(preds, labels)} else: raise KeyError(task_name) def xnli_compute_metrics(task_name, preds, labels): warnings.warn(DEPRECATION_WARNING, FutureWarning) requires_backends(xnli_compute_metrics, "sklearn") if len(preds) != len(labels): raise ValueError(f"Predictions and labels have mismatched lengths {len(preds)} and {len(labels)}") if task_name == "xnli": return {"acc": simple_accuracy(preds, labels)} else: raise KeyError(task_name)
transformers/src/transformers/data/metrics/__init__.py/0
{ "file_path": "transformers/src/transformers/data/metrics/__init__.py", "repo_id": "transformers", "token_count": 1413 }
86
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import ABC, abstractmethod from collections import UserDict from typing import Dict, List, Optional, Tuple, Union import numpy as np import torch from ..utils import add_start_docstrings from .beam_constraints import Constraint, ConstraintListState PROCESS_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size * num_beams, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using any class inheriting from [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) next_scores (`torch.FloatTensor` of shape `(batch_size, 2 * num_beams)`): Current scores of the top `2 * num_beams` non-finished beam hypotheses. next_tokens (`torch.LongTensor` of shape `(batch_size, 2 * num_beams)`): `input_ids` of the tokens corresponding to the top `2 * num_beams` non-finished beam hypotheses. next_indices (`torch.LongTensor` of shape `(batch_size, 2 * num_beams)`): Beam indices indicating to which beam hypothesis the `next_tokens` correspond. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`Union[int, List[int]]`, *optional*): The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. beam_indices (`torch.LongTensor`, *optional*): Beam indices indicating to which beam hypothesis each token correspond. group_index (`int`, *optional*): The index of the group of beams. Used with [`~PreTrainedModel.group_beam_search`]. Return: `UserDict`: A dictionary composed of the fields as defined above: - **next_beam_scores** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Updated scores of all non-finished beams. - **next_beam_tokens** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Next tokens to be added to the non-finished beam_hypotheses. - **next_beam_indices** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Beam indices indicating to which beam the next tokens shall be added. """ FINALIZE_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size * num_beams, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using any class inheriting from [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) final_beam_scores (`torch.FloatTensor` of shape `(batch_size * num_beams)`): The final scores of all non-finished beams. final_beam_tokens (`torch.FloatTensor` of shape `(batch_size * num_beams)`): The last tokens to be added to the non-finished beam_hypotheses. final_beam_indices (`torch.FloatTensor` of shape `(batch_size * num_beams)`): The beam indices indicating to which beam the `final_beam_tokens` shall be added. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`Union[int, List[int]]`, *optional*): The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. Return: `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. """ class BeamScorer(ABC): """ Abstract base class for all beam scorers that are used for [`~PreTrainedModel.beam_search`] and [`~PreTrainedModel.beam_sample`]. """ @abstractmethod @add_start_docstrings(PROCESS_INPUTS_DOCSTRING) def process( self, input_ids: torch.LongTensor, next_scores: torch.FloatTensor, next_tokens: torch.LongTensor, next_indices: torch.LongTensor, **kwargs, ) -> Tuple[torch.Tensor]: raise NotImplementedError("This is an abstract method.") @abstractmethod @add_start_docstrings(FINALIZE_INPUTS_DOCSTRING) def finalize( self, input_ids: torch.LongTensor, next_scores: torch.FloatTensor, next_tokens: torch.LongTensor, next_indices: torch.LongTensor, max_length: int, **kwargs, ) -> torch.LongTensor: raise NotImplementedError("This is an abstract method.") class BeamSearchScorer(BeamScorer): r""" [`BeamScorer`] implementing standard beam search decoding. Adapted in part from [Facebook's XLM beam search code](https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529). Reference for the diverse beam search algorithm and implementation [Ashwin Kalyan's DBS implementation](https://github.com/ashwinkalyan/dbs/blob/master/dbs/beam_utils.lua) Args: batch_size (`int`): Batch Size of `input_ids` for which standard beam search decoding is run in parallel. num_beams (`int`): Number of beams for beam search. device (`torch.device`): Defines the device type (*e.g.*, `"cpu"` or `"cuda"`) on which this instance of `BeamSearchScorer` will be allocated. length_penalty (`float`, *optional*, defaults to 1.0): Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while `length_penalty` < 0.0 encourages shorter sequences. do_early_stopping (`bool` or `str`, *optional*, defaults to `False`): Controls the stopping condition for beam-based methods, like beam-search. It accepts the following values: `True`, where the generation stops as soon as there are `num_beams` complete candidates; `False`, where an heuristic is applied and the generation stops when is it very unlikely to find better candidates; `"never"`, where the beam search procedure only stops when there cannot be better candidates (canonical beam search algorithm). num_beam_hyps_to_keep (`int`, *optional*, defaults to 1): The number of beam hypotheses that shall be returned upon calling [`~transformers.BeamSearchScorer.finalize`]. num_beam_groups (`int`, *optional*, defaults to 1): Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams. See [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details. max_length (`int`, *optional*): The maximum length of the sequence to be generated. """ def __init__( self, batch_size: int, num_beams: int, device: torch.device, length_penalty: Optional[float] = 1.0, do_early_stopping: Optional[Union[bool, str]] = False, num_beam_hyps_to_keep: Optional[int] = 1, num_beam_groups: Optional[int] = 1, max_length: Optional[int] = None, ): self.num_beams = num_beams self.device = device self.length_penalty = length_penalty self.do_early_stopping = do_early_stopping self.num_beam_hyps_to_keep = num_beam_hyps_to_keep self.num_beam_groups = num_beam_groups self.group_size = self.num_beams // self.num_beam_groups self._is_init = False # self._beam_hyps[i*self.num_beam_groups+j] is the beam_hyps of the j-th group in the i-th mini-batch. # If group_beam_search is not used, the list consists of `batch_size` beam_hyps. self._beam_hyps = [ BeamHypotheses( num_beams=self.group_size, length_penalty=self.length_penalty, early_stopping=self.do_early_stopping, max_length=max_length, ) for _ in range(batch_size * self.num_beam_groups) ] # self._done[i*self.num_beam_groups+j] indicates whether the generation of the beam_hyps of the j-th group # in the i-th mini-batch is complete. self._done = torch.tensor( [False for _ in range(batch_size * self.num_beam_groups)], dtype=torch.bool, device=self.device ) if not isinstance(num_beams, int) or num_beams <= 1: raise ValueError( f"`num_beams` has to be an integer strictly greater than 1, but is {num_beams}. For `num_beams` == 1," " one should make use of `greedy_search` instead." ) if not isinstance(num_beam_groups, int) or (num_beam_groups > num_beams) or (num_beams % num_beam_groups != 0): raise ValueError( "`num_beam_groups` has to be an integer smaller or equal than `num_beams` and `num_beams` has to be" f" divisible by `num_beam_groups`, but is {num_beam_groups} with `num_beams` being {num_beams}." ) @property def is_done(self) -> bool: return self._done.all() def process( self, input_ids: torch.LongTensor, next_scores: torch.FloatTensor, next_tokens: torch.LongTensor, next_indices: torch.LongTensor, pad_token_id: Optional[Union[int, torch.Tensor]] = None, eos_token_id: Optional[Union[int, List[int], torch.Tensor]] = None, beam_indices: Optional[torch.LongTensor] = None, group_index: Optional[int] = 0, decoder_prompt_len: Optional[int] = 0, ) -> Dict[str, torch.Tensor]: # add up to the length which the next_scores is calculated on (including decoder prompt) cur_len = input_ids.shape[-1] + 1 batch_size = len(self._beam_hyps) // self.num_beam_groups if not (batch_size == (input_ids.shape[0] // self.group_size)): if self.num_beam_groups > 1: raise ValueError( f"A group beam size of {input_ids.shape[0]} is used as the input, but a group beam " f"size of {self.group_size} is expected by the beam scorer." ) else: raise ValueError( f"A beam size of {input_ids.shape[0]} is used as the input, but a beam size of " f"{self.group_size} is expected by the beam scorer." ) device = input_ids.device next_beam_scores = torch.zeros((batch_size, self.group_size), dtype=next_scores.dtype, device=device) next_beam_tokens = torch.zeros((batch_size, self.group_size), dtype=next_tokens.dtype, device=device) next_beam_indices = torch.zeros((batch_size, self.group_size), dtype=next_indices.dtype, device=device) if eos_token_id is not None and not isinstance(eos_token_id, torch.Tensor): if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] eos_token_id = torch.tensor(eos_token_id) for batch_idx in range(batch_size): batch_group_idx = batch_idx * self.num_beam_groups + group_index if self._done[batch_group_idx]: if self.num_beams < len(self._beam_hyps[batch_group_idx]): raise ValueError(f"Batch can only be done if at least {self.num_beams} beams have been generated") if eos_token_id is None or pad_token_id is None: raise ValueError("Generated beams >= num_beams -> eos_token_id and pad_token have to be defined") # pad the batch next_beam_scores[batch_idx, :] = 0 next_beam_tokens[batch_idx, :] = pad_token_id next_beam_indices[batch_idx, :] = 0 continue # next tokens for this sentence beam_idx = 0 for beam_token_rank, (next_token, next_score, next_index) in enumerate( zip(next_tokens[batch_idx], next_scores[batch_idx], next_indices[batch_idx]) ): batch_beam_idx = batch_idx * self.group_size + next_index # add to generated hypotheses if end of sentence if (eos_token_id is not None) and (next_token.item() in eos_token_id): # if beam_token does not belong to top num_beams tokens, it should not be added is_beam_token_worse_than_top_num_beams = beam_token_rank >= self.group_size if is_beam_token_worse_than_top_num_beams: continue if beam_indices is not None: beam_index = beam_indices[batch_beam_idx] beam_index = beam_index + (batch_beam_idx,) else: beam_index = None self._beam_hyps[batch_group_idx].add( input_ids[batch_beam_idx].clone(), next_score.item(), beam_indices=beam_index, generated_len=cur_len - decoder_prompt_len, ) else: # add next predicted token since it is not eos_token next_beam_scores[batch_idx, beam_idx] = next_score next_beam_tokens[batch_idx, beam_idx] = next_token next_beam_indices[batch_idx, beam_idx] = batch_beam_idx beam_idx += 1 # once the beam for next step is full, don't add more tokens to it. if beam_idx == self.group_size: break if beam_idx < self.group_size: raise ValueError( f"At most {self.group_size} tokens in {next_tokens[batch_idx]} can be equal to `eos_token_id:" f" {eos_token_id}`. Make sure {next_tokens[batch_idx]} are corrected." ) # Check if we are done so that we can save a pad step if all(done) self._done[batch_group_idx] = self._done[batch_group_idx] or self._beam_hyps[batch_group_idx].is_done( next_scores[batch_idx].max().item(), cur_len, decoder_prompt_len ) return UserDict( { "next_beam_scores": next_beam_scores.view(-1), "next_beam_tokens": next_beam_tokens.view(-1), "next_beam_indices": next_beam_indices.view(-1), } ) def finalize( self, input_ids: torch.LongTensor, final_beam_scores: torch.FloatTensor, final_beam_tokens: torch.LongTensor, final_beam_indices: torch.LongTensor, max_length: int, pad_token_id: Optional[Union[int, torch.Tensor]] = None, eos_token_id: Optional[Union[int, List[int], torch.Tensor]] = None, beam_indices: Optional[torch.LongTensor] = None, decoder_prompt_len: Optional[int] = 0, ) -> Tuple[torch.LongTensor]: batch_size = len(self._beam_hyps) // self.num_beam_groups if eos_token_id is not None and not isinstance(eos_token_id, torch.Tensor): if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] eos_token_id = torch.tensor(eos_token_id) # finalize all open beam hypotheses and add to generated hypotheses for batch_group_idx, beam_hyp in enumerate(self._beam_hyps): if self._done[batch_group_idx]: continue # all open beam hypotheses are added to the beam hypothesis # beam hypothesis class automatically keeps the best beams for index_per_group in range(self.group_size): batch_beam_idx = batch_group_idx * self.group_size + index_per_group final_score = final_beam_scores[batch_beam_idx].item() final_tokens = input_ids[batch_beam_idx] beam_index = beam_indices[batch_beam_idx] if beam_indices is not None else None generated_len = final_tokens.shape[-1] - decoder_prompt_len beam_hyp.add(final_tokens, final_score, beam_indices=beam_index, generated_len=generated_len) # select the best hypotheses sent_lengths = input_ids.new(batch_size * self.num_beam_hyps_to_keep) best = [] best_indices = [] best_scores = torch.zeros(batch_size * self.num_beam_hyps_to_keep, device=self.device, dtype=torch.float32) # retrieve best hypotheses for i in range(batch_size): beam_hyps_in_batch = self._beam_hyps[i * self.num_beam_groups : (i + 1) * self.num_beam_groups] candidate_beams = [beam for beam_hyp in beam_hyps_in_batch for beam in beam_hyp.beams] sorted_hyps = sorted(candidate_beams, key=lambda x: x[0]) for j in range(self.num_beam_hyps_to_keep): best_hyp_tuple = sorted_hyps.pop() best_score = best_hyp_tuple[0] best_hyp = best_hyp_tuple[1] best_index = best_hyp_tuple[2] sent_lengths[self.num_beam_hyps_to_keep * i + j] = len(best_hyp) # append hyp to lists best.append(best_hyp) # append indices to list best_indices.append(best_index) best_scores[i * self.num_beam_hyps_to_keep + j] = best_score # prepare for adding eos sent_lengths_max = sent_lengths.max().item() + 1 sent_max_len = min(sent_lengths_max, max_length) if max_length is not None else sent_lengths_max decoded: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, sent_max_len) if len(best_indices) > 0 and best_indices[0] is not None: indices: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, sent_max_len) else: indices = None # shorter batches are padded if needed if sent_lengths.min().item() != sent_lengths.max().item(): if pad_token_id is None: raise ValueError("`pad_token_id` has to be defined") decoded.fill_(pad_token_id) if indices is not None: indices.fill_(-1) # fill with hypotheses and eos_token_id if the latter fits in for i, (hypo, best_idx) in enumerate(zip(best, best_indices)): decoded[i, : sent_lengths[i]] = hypo if indices is not None: indices[i, : len(best_idx)] = torch.tensor(best_idx) if sent_lengths[i] < sent_max_len: # inserting only the first eos_token_id decoded[i, sent_lengths[i]] = eos_token_id[0] return UserDict( { "sequences": decoded, "sequence_scores": best_scores, "beam_indices": indices, } ) class ConstrainedBeamSearchScorer(BeamScorer): r""" [`BeamScorer`] implementing constrained beam search decoding. Args: batch_size (`int`): Batch Size of `input_ids` for which standard beam search decoding is run in parallel. num_beams (`int`): Number of beams for beam search. constraints (`List[Constraint]`): A list of positive constraints represented as `Constraint` objects that must be fulfilled in the generation output. For more information, the documentation of [`Constraint`] should be read. device (`torch.device`): Defines the device type (*e.g.*, `"cpu"` or `"cuda"`) on which this instance of `BeamSearchScorer` will be allocated. length_penalty (`float`, *optional*, defaults to 1.0): Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while `length_penalty` < 0.0 encourages shorter sequences. do_early_stopping (`bool` or `str`, *optional*, defaults to `False`): Controls the stopping condition for beam-based methods, like beam-search. It accepts the following values: `True`, where the generation stops as soon as there are `num_beams` complete candidates; `False`, where an heuristic is applied and the generation stops when is it very unlikely to find better candidates; `"never"`, where the beam search procedure only stops when there cannot be better candidates (canonical beam search algorithm). num_beam_hyps_to_keep (`int`, *optional*, defaults to 1): The number of beam hypotheses that shall be returned upon calling [`~transformers.BeamSearchScorer.finalize`]. num_beam_groups (`int`, *optional*, defaults to 1): Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams. See [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details. max_length (`int`, *optional*): The maximum length of the sequence to be generated. """ def __init__( self, batch_size: int, num_beams: int, constraints: List[Constraint], device: torch.device, length_penalty: Optional[float] = 1.0, do_early_stopping: Optional[Union[bool, str]] = False, num_beam_hyps_to_keep: Optional[int] = 1, num_beam_groups: Optional[int] = 1, max_length: Optional[int] = None, ): self.num_beams = num_beams self.device = device self.length_penalty = length_penalty self.do_early_stopping = do_early_stopping self.num_beam_hyps_to_keep = num_beam_hyps_to_keep self.num_beam_groups = num_beam_groups self.group_size = self.num_beams // self.num_beam_groups self.constraints = constraints self._is_init = False self._beam_hyps = [ BeamHypotheses( num_beams=self.num_beams, length_penalty=self.length_penalty, early_stopping=self.do_early_stopping, max_length=max_length, ) for _ in range(batch_size) ] self._done = torch.tensor([False for _ in range(batch_size)], dtype=torch.bool, device=self.device) if not isinstance(num_beams, int) or num_beams <= 1: raise ValueError( f"`num_beams` has to be an integer strictly greater than 1, but is {num_beams}. For `num_beams` == 1," " one should make use of `greedy_search` instead." ) if not isinstance(num_beam_groups, int) or (num_beam_groups > num_beams) or (num_beams % num_beam_groups != 0): raise ValueError( "`num_beam_groups` has to be an integer smaller or equal than `num_beams` and `num_beams` has to be" f" divisible by `num_beam_groups`, but is {num_beam_groups} with `num_beams` being {num_beams}." ) @property def is_done(self) -> bool: return self._done.all() def make_constraint_states(self, n): return [ConstraintListState([constraint.copy() for constraint in self.constraints]) for _ in range(n)] def check_completes_constraints(self, sequence): new_state = self.make_constraint_states(1)[0] new_state.reset(sequence) return new_state.completed def process( self, input_ids: torch.LongTensor, next_scores: torch.FloatTensor, next_tokens: torch.LongTensor, next_indices: torch.LongTensor, scores_for_all_vocab: torch.FloatTensor, pad_token_id: Optional[Union[int, torch.Tensor]] = None, eos_token_id: Optional[Union[int, List[int], torch.Tensor]] = None, beam_indices: Optional[torch.LongTensor] = None, decoder_prompt_len: Optional[int] = 0, ) -> Tuple[torch.Tensor]: r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size * num_beams, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using any class inheriting from [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) next_scores (`torch.FloatTensor` of shape `(batch_size, 2 * num_beams)`): Current scores of the top `2 * num_beams` non-finished beam hypotheses. next_tokens (`torch.LongTensor` of shape `(batch_size, 2 * num_beams)`): `input_ids` of the tokens corresponding to the top `2 * num_beams` non-finished beam hypotheses. next_indices (`torch.LongTensor` of shape `(batch_size, 2 * num_beams)`): Beam indices indicating to which beam hypothesis the `next_tokens` correspond. scores_for_all_vocab (`torch.FloatTensor` of shape `(batch_size * num_beams, sequence_length)`): The scores of all tokens in the vocabulary for each of the beam hypotheses. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`Union[int, List[int]]`, *optional*): The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. beam_indices (`torch.LongTensor`, *optional*): Beam indices indicating to which beam hypothesis each token correspond. decoder_prompt_len (`int`, *optional*): The length of prompt that is included in the input to decoder. Return: `UserDict`: A dictionary composed of the fields as defined above: - **next_beam_scores** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Updated scores of all non-finished beams. - **next_beam_tokens** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Next tokens to be added to the non-finished beam_hypotheses. - **next_beam_indices** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Beam indices indicating to which beam the next tokens shall be added. """ # add up to the length which the next_scores is calculated on (including decoder prompt) cur_len = input_ids.shape[-1] + 1 batch_size = len(self._beam_hyps) if not (batch_size == (input_ids.shape[0] // self.group_size)): if self.num_beam_groups > 1: raise ValueError( f"A group beam size of {input_ids.shape[0]} is used as the input, but a group beam " f"size of {self.group_size} is expected by the beam scorer." ) else: raise ValueError( f"A beam size of {input_ids.shape[0]} is used as the input, but a beam size of " f"{self.group_size} is expected by the beam scorer." ) device = input_ids.device next_beam_scores = torch.zeros((batch_size, self.group_size), dtype=next_scores.dtype, device=device) next_beam_tokens = torch.zeros((batch_size, self.group_size), dtype=next_tokens.dtype, device=device) next_beam_indices = torch.zeros((batch_size, self.group_size), dtype=next_indices.dtype, device=device) if eos_token_id is not None and not isinstance(eos_token_id, torch.Tensor): if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] eos_token_id = torch.tensor(eos_token_id) for batch_idx, beam_hyp in enumerate(self._beam_hyps): if self._done[batch_idx]: if self.num_beams < len(beam_hyp): raise ValueError(f"Batch can only be done if at least {self.num_beams} beams have been generated") if eos_token_id is None or pad_token_id is None: raise ValueError("Generated beams >= num_beams -> eos_token_id and pad_token have to be defined") # pad the batch next_beam_scores[batch_idx, :] = 0 next_beam_tokens[batch_idx, :] = pad_token_id next_beam_indices[batch_idx, :] = 0 continue # next tokens for this sentence. beam_idx = 0 for beam_token_rank, (next_token, next_score, next_index) in enumerate( zip(next_tokens[batch_idx], next_scores[batch_idx], next_indices[batch_idx]) ): batch_beam_idx = batch_idx * self.group_size + next_index # add to generated hypotheses if end of sentence if (eos_token_id is not None) and (next_token.item() in eos_token_id): # if beam_token does not belong to top num_beams tokens, it should not be added is_beam_token_worse_than_top_num_beams = beam_token_rank >= self.group_size if is_beam_token_worse_than_top_num_beams: continue completes_constraint = self.check_completes_constraints(input_ids[batch_beam_idx].cpu().tolist()) if completes_constraint: if beam_indices is not None: beam_index = beam_indices[batch_beam_idx] beam_index = beam_index + (batch_beam_idx,) else: beam_index = None beam_hyp.add( input_ids[batch_beam_idx].clone(), next_score.item(), beam_indices=beam_index, generated_len=cur_len - decoder_prompt_len, ) else: # add next predicted token since it is not eos_token next_beam_scores[batch_idx, beam_idx] = next_score next_beam_tokens[batch_idx, beam_idx] = next_token next_beam_indices[batch_idx, beam_idx] = batch_beam_idx beam_idx += 1 # once the beam for next step is full, don't add more tokens to it. if beam_idx == self.group_size: break new_scores, new_tokens, new_indices = self.step_sentence_constraint( batch_idx, input_ids, scores_for_all_vocab, next_beam_scores[batch_idx], next_beam_tokens[batch_idx], next_beam_indices[batch_idx], ) next_beam_scores[batch_idx] = new_scores next_beam_tokens[batch_idx] = new_tokens next_beam_indices[batch_idx] = new_indices if beam_idx < self.group_size: raise ValueError( f"At most {self.group_size} tokens in {next_tokens[batch_idx]} can be equal to `eos_token_id:" f" {eos_token_id}`. Make sure {next_tokens[batch_idx]} are corrected." ) # Check if we are done so that we can save a pad step if all(done) self._done[batch_idx] = self._done[batch_idx] or beam_hyp.is_done( next_scores[batch_idx].max().item(), cur_len, decoder_prompt_len ) return UserDict( { "next_beam_scores": next_beam_scores.view(-1), "next_beam_tokens": next_beam_tokens.view(-1), "next_beam_indices": next_beam_indices.view(-1), } ) def step_sentence_constraint( self, batch_idx: int, input_ids: torch.LongTensor, vocab_scores: torch.FloatTensor, sent_beam_scores: torch.FloatTensor, sent_beam_tokens: torch.LongTensor, sent_beam_indices: torch.LongTensor, push_progress: bool = False, ): # sent_beam_tokens are the next {num_beams} number of tokens that are under consideration for this beam # (candidate next tokens) # 1. Adding "advance_tokens" # using ConstraintStateList.advance(), we propose new tokens to be added into this "candidate list" that will # advance us in fulfilling the constraints. # 2. Selecting best candidates such that we end up with highest probable candidates # that fulfill our constraints. orig_len = sent_beam_indices.size(0) device = sent_beam_indices.device # initialize states topk_contraint_states = self.make_constraint_states(orig_len) advance_constraint_states = self.make_constraint_states(orig_len) sidx, eidx = batch_idx * orig_len, (batch_idx + 1) * orig_len this_batch_input_ids = input_ids[sidx:eidx] this_batch_token_scores = vocab_scores[sidx:eidx] full_hypotheses = torch.cat((input_ids[sent_beam_indices], sent_beam_tokens.unsqueeze(-1)), dim=-1) # need to make new hypothesis that advance the constraints track_new = { "new_seqs": full_hypotheses.tolist(), "new_states": [], "new_indices": [], "new_tokens": [], "new_scores": [], } for seq_idx, pre_seq in enumerate(this_batch_input_ids): # pre_seq = ith sequence generated before this step. # input_ids -> (topk) generic beam search best model next tokens # -> (advance) constraints forcing the next token # either way, we need to sort them into "banks" later, so store a "ConstraintListState" for all types of # hypotheses. topk_state = topk_contraint_states[seq_idx] topk_state.reset(full_hypotheses[seq_idx].cpu().tolist()) advance_state = advance_constraint_states[seq_idx] advance_state.reset(pre_seq.cpu().tolist()) if not advance_state.completed: advance_tokens = torch.LongTensor(advance_state.advance()).to(device) for advance_token in advance_tokens: # since adding each `advance_token` leads to a different hypothesis, create new state instance. new_state = advance_state.copy(stateful=True) new_state.add(advance_token.cpu().tolist()) advance_seq = torch.cat((pre_seq, advance_token.unsqueeze(0)), -1).cpu().tolist() if advance_seq not in track_new["new_seqs"]: # prevent duplicates, which are basically bound to happen in this process. track_new["new_seqs"].append(advance_seq) track_new["new_indices"].append(sidx + seq_idx) # idx -> global idx across all the batches track_new["new_tokens"].append(advance_token) track_new["new_scores"].append(this_batch_token_scores[seq_idx].take(advance_token)) track_new["new_states"].append(new_state) elif push_progress: # Basically, `sent_beam_indices` often chooses very little among `input_ids` the generated sequences that # actually fulfill our constraints. For example, let constraints == ["loves pies"] and # pre_seq_1 = "The child loves pies and" pre_seq_2 = "The child plays in the playground and" # Without this step, if `sent_beam_indices` is something like [1,1], then # 1. `pre_seq_1` won't be added to the list of (topk) hypothesis since it's not in the indices and # 2. it won't be added to the list of (advance) hypothesis since it's completed already. (this is # the else part of `if constraints_completed[seq_idx]`) # 3. it ends up simply getting removed from consideration. # #3 might be fine and actually desired, since it's likely that it's a low-probability output anyways, # especially if it's not in the list of `sent_beam_indices`. But this often leads to lengthened beam # search times, since completed sequences keep getting removed after all this effort for constrained # generation. # Here, we basically take `pre_seq_1` and to "push" it into the considered list of hypotheses, by simply # appending the next likely token in the vocabulary and adding it to the list of hypotheses. new_score, new_token = torch.max(this_batch_token_scores[seq_idx], 0) # some next probable token advance_seq = torch.cat((pre_seq, new_token.unsqueeze(0)), -1) advance_state = advance_constraint_states[seq_idx] advance_seq = advance_seq.cpu().tolist() advance_state.reset(advance_seq) if advance_seq not in track_new["new_seqs"]: # but still don't want to have duplicates track_new["new_seqs"].append(advance_seq) track_new["new_indices"].append(seq_idx) track_new["new_tokens"].append(new_token) track_new["new_scores"].append(new_score) track_new["new_states"].append(advance_state) if len(track_new["new_indices"]) > 0: new_indices = torch.tensor(track_new["new_indices"]).to(device) new_tokens = torch.stack(track_new["new_tokens"]).to(device) new_scores = torch.stack(track_new["new_scores"]).to(device) all_states = topk_contraint_states + track_new["new_states"] all_tokens = torch.cat((sent_beam_tokens, new_tokens), -1) all_scores = torch.cat((sent_beam_scores, new_scores), -1) all_banks = torch.tensor([one.get_bank() for one in all_states]).to(device) zipped = all_banks * 100 + all_scores indices = zipped.sort(descending=True).indices sorted_banks = all_banks[indices] # Then we end up with {sorted among bank C}, {sorted among bank C-1}, ..., {sorted among bank 0} counter = -1 cur_bank = sorted_banks[0] increments = [] for bank in sorted_banks: if bank == cur_bank: counter += 1 else: counter = 0 cur_bank = bank increments.append(counter) rearrangers = torch.tensor(np.argsort(increments, kind="mergesort")) indices = indices[rearrangers][:orig_len] sent_beam_scores = all_scores[indices] sent_beam_tokens = all_tokens[indices] sent_beam_indices = torch.cat((sent_beam_indices, new_indices))[indices] return sent_beam_scores, sent_beam_tokens, sent_beam_indices def finalize( self, input_ids: torch.LongTensor, final_beam_scores: torch.FloatTensor, final_beam_tokens: torch.LongTensor, final_beam_indices: torch.LongTensor, max_length: int, pad_token_id: Optional[Union[int, torch.Tensor]] = None, eos_token_id: Optional[Union[int, List[int], torch.Tensor]] = None, beam_indices: Optional[torch.LongTensor] = None, decoder_prompt_len: Optional[int] = 0, ) -> Tuple[torch.LongTensor]: batch_size = len(self._beam_hyps) if eos_token_id is not None and not isinstance(eos_token_id, torch.Tensor): if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] eos_token_id = torch.tensor(eos_token_id) # finalize all open beam hypotheses and add to generated hypotheses for batch_idx, beam_hyp in enumerate(self._beam_hyps): if self._done[batch_idx]: continue # all open beam hypotheses are added to the beam hypothesis # beam hypothesis class automatically keeps the best beams ids_collect = [] for beam_id in range(self.num_beams): batch_beam_idx = batch_idx * self.num_beams + beam_id final_score = final_beam_scores[batch_beam_idx].item() final_tokens = input_ids[batch_beam_idx] completes_constraint = self.check_completes_constraints(final_tokens.cpu().tolist()) if completes_constraint: beam_index = beam_indices[batch_beam_idx] if beam_indices is not None else None generated_len = final_tokens.shape[-1] - decoder_prompt_len beam_hyp.add(final_tokens, final_score, beam_indices=beam_index, generated_len=generated_len) ids_collect.append(beam_id) # due to overly complex constraints or other factors, sometimes we can't gaurantee a successful # generation. In these cases we simply return the highest scoring outputs. if len(ids_collect) < self.num_beam_hyps_to_keep: for beam_id in range(self.num_beams): if beam_id not in ids_collect: batch_beam_idx = batch_idx * self.num_beams + beam_id final_score = final_beam_scores[batch_beam_idx].item() final_tokens = input_ids[batch_beam_idx] generated_len = final_tokens.shape[-1] - decoder_prompt_len beam_hyp.add(final_tokens, final_score, generated_len=generated_len) if len(ids_collect) >= self.num_beam_hyps_to_keep: break # select the best hypotheses sent_lengths = input_ids.new(batch_size * self.num_beam_hyps_to_keep) best = [] best_indices = [] best_scores = torch.zeros(batch_size * self.num_beam_hyps_to_keep, device=self.device, dtype=torch.float32) # retrieve best hypotheses for i, beam_hyp in enumerate(self._beam_hyps): sorted_hyps = sorted(beam_hyp.beams, key=lambda x: x[0]) for j in range(self.num_beam_hyps_to_keep): best_hyp_tuple = sorted_hyps.pop() best_score = best_hyp_tuple[0] best_hyp = best_hyp_tuple[1] best_index = best_hyp_tuple[2] sent_lengths[self.num_beam_hyps_to_keep * i + j] = len(best_hyp) # append to lists best.append(best_hyp) # append indices to list best_indices.append(best_index) best_scores[i * self.num_beam_hyps_to_keep + j] = best_score # prepare for adding eos sent_lengths_max = sent_lengths.max().item() + 1 sent_max_len = min(sent_lengths_max, max_length) if max_length is not None else sent_lengths_max decoded: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, sent_max_len) if len(best_indices) > 0 and best_indices[0] is not None: indices: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, sent_max_len) else: indices = None # shorter batches are padded if needed if sent_lengths.min().item() != sent_lengths.max().item(): if pad_token_id is None: raise ValueError("`pad_token_id` has to be defined") decoded.fill_(pad_token_id) if indices is not None: indices.fill_(-1) # fill with hypotheses and eos_token_id if the latter fits in for i, (hypo, best_idx) in enumerate(zip(best, best_indices)): decoded[i, : sent_lengths[i]] = hypo if indices is not None: indices[i, : len(best_idx)] = torch.tensor(best_idx) if sent_lengths[i] < sent_max_len: # inserting only the first eos_token_id decoded[i, sent_lengths[i]] = eos_token_id[0] return UserDict( { "sequences": decoded, "sequence_scores": best_scores, "beam_indices": indices, } ) class BeamHypotheses: def __init__(self, num_beams: int, length_penalty: float, early_stopping: bool, max_length: Optional[int] = None): """ Initialize n-best list of hypotheses. """ self.length_penalty = length_penalty self.early_stopping = early_stopping self.max_length = max_length self.num_beams = num_beams self.beams = [] self.worst_score = 1e9 if not isinstance(self.early_stopping, bool) and self.max_length is None: raise ValueError( "When `do_early_stopping` is set to a string, `max_length` must be defined. Ensure it is passed to the" " BeamScorer class instance at initialization time." ) def __len__(self): """ Number of hypotheses in the list. """ return len(self.beams) def add( self, hyp: torch.LongTensor, sum_logprobs: float, beam_indices: Optional[torch.LongTensor] = None, generated_len: Optional[int] = None, ): """ Add a new hypothesis to the list. """ if generated_len is not None: score = sum_logprobs / (generated_len**self.length_penalty) # This 'else' case exists for retrocompatibility else: score = sum_logprobs / (hyp.shape[-1] ** self.length_penalty) if len(self) < self.num_beams or score > self.worst_score: self.beams.append((score, hyp, beam_indices)) if len(self) > self.num_beams: sorted_next_scores = sorted([(s, idx) for idx, (s, _, _) in enumerate(self.beams)]) del self.beams[sorted_next_scores[0][1]] self.worst_score = sorted_next_scores[1][0] else: self.worst_score = min(score, self.worst_score) def is_done(self, best_sum_logprobs: float, cur_len: int, decoder_prompt_len: Optional[int] = 0) -> bool: """ If there are enough hypotheses and that none of the hypotheses being generated can become better than the worst one in the heap, then we are done with this sentence. """ if len(self) < self.num_beams: return False # `True`: stop as soon as at least `num_beams` hypotheses are finished if self.early_stopping is True: return True # `False`: heuristic -- compute best possible score from `cur_len`, even though it is not entirely accurate # when `length_penalty` is positive. See the discussion below for more details. # https://github.com/huggingface/transformers/pull/20901#issuecomment-1369845565 elif self.early_stopping is False: highest_attainable_score = best_sum_logprobs / (cur_len - decoder_prompt_len) ** self.length_penalty ret = self.worst_score >= highest_attainable_score return ret # `"never"`: compute the best possible score, depending on the signal of `length_penalty` else: # `length_penalty` > 0.0 -> max denominator is obtaned from `max_length`, not from `cur_len` -> min # abs(`highest_attainable_score`) is obtained -> `highest_attainable_score` is negative, hence we obtain # its max this way if self.length_penalty > 0.0: if self.max_length <= decoder_prompt_len: raise ValueError("max_length is not larger than decoder prompt length") highest_attainable_score = ( best_sum_logprobs / (self.max_length - decoder_prompt_len) ** self.length_penalty ) # the opposite logic applies here (max `highest_attainable_score` from `cur_len`) else: highest_attainable_score = best_sum_logprobs / (cur_len - decoder_prompt_len) ** self.length_penalty ret = self.worst_score >= highest_attainable_score return ret
transformers/src/transformers/generation/beam_search.py/0
{ "file_path": "transformers/src/transformers/generation/beam_search.py", "repo_id": "transformers", "token_count": 22993 }
87
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools from dataclasses import dataclass from typing import Any, Iterable, List, Optional, Tuple from .image_processing_utils import BaseImageProcessor from .utils.import_utils import is_torch_available, is_torchvision_available if is_torchvision_available(): from torchvision.transforms import Compose if is_torch_available(): import torch @dataclass(frozen=True) class SizeDict: """ Hashable dictionary to store image size information. """ height: int = None width: int = None longest_edge: int = None shortest_edge: int = None max_height: int = None max_width: int = None def __getitem__(self, key): if hasattr(self, key): return getattr(self, key) raise KeyError(f"Key {key} not found in SizeDict.") class BaseImageProcessorFast(BaseImageProcessor): _transform_params = None def _build_transforms(self, **kwargs) -> "Compose": """ Given the input settings e.g. do_resize, build the image transforms. """ raise NotImplementedError def _validate_params(self, **kwargs) -> None: for k, v in kwargs.items(): if k not in self._transform_params: raise ValueError(f"Invalid transform parameter {k}={v}.") @functools.lru_cache(maxsize=1) def get_transforms(self, **kwargs) -> "Compose": self._validate_params(**kwargs) return self._build_transforms(**kwargs) def to_dict(self): encoder_dict = super().to_dict() encoder_dict.pop("_transform_params", None) return encoder_dict def get_image_size_for_max_height_width( image_size: Tuple[int, int], max_height: int, max_width: int, ) -> Tuple[int, int]: """ Computes the output image size given the input image and the maximum allowed height and width. Keep aspect ratio. Important, even if image_height < max_height and image_width < max_width, the image will be resized to at least one of the edges be equal to max_height or max_width. For example: - input_size: (100, 200), max_height: 50, max_width: 50 -> output_size: (25, 50) - input_size: (100, 200), max_height: 200, max_width: 500 -> output_size: (200, 400) Args: image_size (`Tuple[int, int]`): The image to resize. max_height (`int`): The maximum allowed height. max_width (`int`): The maximum allowed width. """ height, width = image_size height_scale = max_height / height width_scale = max_width / width min_scale = min(height_scale, width_scale) new_height = int(height * min_scale) new_width = int(width * min_scale) return new_height, new_width def safe_squeeze(tensor: "torch.Tensor", axis: Optional[int] = None) -> "torch.Tensor": """ Squeezes a tensor, but only if the axis specified has dim 1. """ if axis is None: return tensor.squeeze() try: return tensor.squeeze(axis=axis) except ValueError: return tensor def max_across_indices(values: Iterable[Any]) -> List[Any]: """ Return the maximum value across all indices of an iterable of values. """ return [max(values_i) for values_i in zip(*values)] def get_max_height_width(images: List["torch.Tensor"]) -> Tuple[int]: """ Get the maximum height and width across all images in a batch. """ _, max_height, max_width = max_across_indices([img.shape for img in images]) return (max_height, max_width)
transformers/src/transformers/image_processing_utils_fast.py/0
{ "file_path": "transformers/src/transformers/image_processing_utils_fast.py", "repo_id": "transformers", "token_count": 1541 }
88
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. "HIGGS through FLUTE (Flexible Lookup Table Engine for LUT-quantized LLMs) integration file" from math import sqrt from ..utils import ( is_flute_available, is_hadamard_available, is_torch_available, ) if is_torch_available(): import torch from torch import nn if is_flute_available(): import flute.utils if is_hadamard_available(): from fast_hadamard_transform import hadamard_transform if is_flute_available(): import flute.utils from flute.integrations.higgs import prepare_data_transposed def pad_to_block(tensor, dims, had_block_size, value=0): pad_dims = [0 for _ in range(2 * len(tensor.shape))] for dim in dims: size = tensor.shape[dim] next_multiple_of_1024 = ((size - 1) // had_block_size + 1) * had_block_size delta = next_multiple_of_1024 - size pad_dims[-2 * dim - 1] = delta return nn.functional.pad(tensor, pad_dims, "constant", value) def get_higgs_grid(p: int, n: int): if (p, n) == (2, 256): return torch.tensor( [ [-2.501467704772949, 0.17954708635807037], [-0.6761789321899414, 1.2728623151779175], [-1.8025816679000854, 0.7613157629966736], [-0.538287878036499, -2.6028504371643066], [0.8415029644966125, -0.8600977659225464], [0.7023013234138489, 3.3138747215270996], [0.5699077844619751, 2.5782253742218018], [3.292393207550049, -0.6016128063201904], [0.5561617016792297, -1.7723814249038696], [-2.1012380123138428, 0.020958125591278076], [0.46085724234580994, 0.8428705334663391], [1.4548040628433228, -0.6156039237976074], [3.210029363632202, 0.3546904921531677], [0.8893890976905823, -0.5967988967895508], [0.8618854284286499, -3.2061192989349365], [1.1360996961593628, -0.23852407932281494], [1.6646337509155273, -0.9265465140342712], [1.4767773151397705, 1.2476022243499756], [-1.0511897802352905, 1.94503915309906], [-1.56318998336792, -0.3264186680316925], [-0.1829211413860321, 0.2922491431236267], [-0.8950616717338562, -1.3887052536010742], [-0.08206957578659058, -1.329533576965332], [-0.487422913312912, 1.4817842245101929], [-1.6769757270812988, -2.8269758224487305], [-1.5057679414749146, 1.8905963897705078], [1.8335362672805786, 1.0515104532241821], [0.3273945450782776, 1.0491033792495728], [-3.295924186706543, -0.7021600008010864], [-1.8428784608840942, -1.2315762042999268], [-0.8575026392936707, -1.7005949020385742], [-1.120667815208435, 0.6467998027801514], [-0.1588846743106842, -1.804071068763733], [-0.8539647459983826, 0.5645008683204651], [-1.4192019701004028, -0.6175029873847961], [1.0799058675765991, 1.7871345281600952], [1.171311855316162, 0.7511613965034485], [2.162078380584717, 0.8044339418411255], [1.3969420194625854, -1.243762493133545], [-0.23818807303905487, 0.053944624960422516], [2.304199457168579, -1.2667627334594727], [1.4225027561187744, 0.568610668182373], [0.376836895942688, -0.7134661674499512], [2.0404467582702637, 0.4087389409542084], [0.7639489769935608, -1.1367933750152588], [0.3622530400753021, -1.4827953577041626], [0.4100743532180786, 0.36108437180519104], [-1.5867475271224976, -1.618212342262268], [-2.2769672870635986, -1.2132309675216675], [0.9184022545814514, -0.34428009390830994], [-0.3902314603328705, 0.21785245835781097], [3.120687484741211, 1.3077973127365112], [1.587440848350525, -1.6506884098052979], [-1.718808889389038, -0.038405973464250565], [-0.6888407468795776, -0.8402308821678162], [-0.7981445789337158, -1.1117373704910278], [-2.4124443531036377, 1.3419722318649292], [-0.6611530184745789, 0.9939885139465332], [-0.33103418350219727, -0.16702833771705627], [-2.4091389179229736, -2.326857566833496], [1.6610108613967896, -2.159703254699707], [0.014884627424180508, 0.3887578248977661], [0.029668325558304787, 1.8786455392837524], [1.180362582206726, 2.699317216873169], [1.821286678314209, -0.5960053205490112], [-0.44835323095321655, 3.327436685562134], [-0.3714401423931122, -2.1466753482818604], [-1.1103475093841553, -2.4536871910095215], [-0.39110705256462097, 0.6670510172843933], [0.474752813577652, -1.1959707736968994], [-0.013110585510730743, -2.52519154548645], [-2.0836575031280518, -1.703289270401001], [-1.1077687740325928, -0.1252644956111908], [-0.4138077199459076, 1.1837692260742188], [-1.977599024772644, 1.688241720199585], [-1.659559965133667, -2.1387736797332764], [0.03242531046271324, 0.6526556015014648], [0.9127950072288513, 0.6099498867988586], [-0.38478314876556396, 0.433487206697464], [0.27454206347465515, -0.27719801664352417], [0.10388526320457458, 2.2812814712524414], [-0.014394169673323631, -3.177137613296509], [-1.2871228456497192, -0.8961855173110962], [0.5720916986465454, -0.921597957611084], [1.1159656047821045, -0.7609877586364746], [2.4383342266082764, -2.2983546257019043], [-0.294057160615921, -0.9770799875259399], [-0.9342701435089111, 1.107579231262207], [-1.549338698387146, 3.090520143508911], [2.6076579093933105, 2.051239013671875], [-0.9259037375450134, 1.407211184501648], [-0.1747353971004486, 0.540488600730896], [-0.8963701725006104, 0.8271111249923706], [0.6480194926261902, 1.0128909349441528], [0.980783998966217, -0.06156221032142639], [-0.16883476078510284, 1.0601658821105957], [0.5839992761611938, 0.004697148688137531], [-0.34228450059890747, -1.2423977851867676], [2.500824451446533, 0.3665279746055603], [-0.17641609907150269, 1.3529551029205322], [0.05378641560673714, 2.817232847213745], [-1.2391047477722168, 2.354328155517578], [0.630434513092041, -0.668536365032196], [1.7576488256454468, 0.6738647818565369], [0.4435231387615204, 0.6000469326972961], [-0.08794835954904556, -0.11511358618736267], [1.6540337800979614, 0.33995017409324646], [-0.04202975332736969, -0.5375117063522339], [-0.4247745871543884, -0.7897617220878601], [0.06695003807544708, 1.2000739574432373], [-3.2508881092071533, 0.28734830021858215], [-1.613816261291504, 0.4944162368774414], [1.3598989248275757, 0.26117825508117676], [2.308382511138916, 1.3462618589401245], [-1.2137469053268433, -1.9254342317581177], [-0.4889402985572815, 1.8136259317398071], [-0.1870335340499878, -0.3480615019798279], [1.0766386985778809, -1.0627082586288452], [0.4651014506816864, 2.131748914718628], [-0.1306295394897461, -0.7811847925186157], [0.06433182954788208, -1.5397958755493164], [-0.2894323468208313, -0.5789554715156555], [-0.6081662178039551, 0.4845278263092041], [2.697964668273926, -0.18515698611736298], [0.1277363896369934, -0.7221432328224182], [0.8700758218765259, 0.35042452812194824], [0.22088994085788727, 0.495242178440094], [-2.5843818187713623, -0.8000828623771667], [0.6732649803161621, -1.4362232685089111], [-1.5286413431167603, 1.0417330265045166], [-1.1222513914108276, -0.6269875764846802], [-0.9752035140991211, -0.8750635385513306], [-2.6369473934173584, 0.6918523907661438], [0.14478731155395508, -0.041986867785453796], [-1.5629483461380005, 1.4369450807571411], [0.38952457904815674, -2.16428804397583], [-0.16885095834732056, 0.7976621985435486], [-3.12416934967041, 1.256506085395813], [0.6843105554580688, -0.4203019142150879], [1.9345275163650513, 1.934950351715088], [0.012184220366179943, -2.1080918312072754], [-0.6350273489952087, 0.7358828186988831], [-0.837304949760437, -0.6214472651481628], [0.08211923390626907, -0.9472538232803345], [2.9332995414733887, -1.4956780672073364], [1.3806978464126587, -0.2916182279586792], [0.06773144006729126, 0.9285762310028076], [-1.1943119764328003, 1.5963770151138306], [1.6395620107650757, -0.32285431027412415], [-1.390851378440857, -0.08273141086101532], [1.816330909729004, -1.2812227010726929], [0.7921574711799622, -2.1135804653167725], [0.5817914605140686, 1.2644577026367188], [1.929347038269043, -0.2386285960674286], [0.8877345323562622, 1.190008521080017], [1.4732073545455933, 0.8935023546218872], [-2.8518524169921875, -1.5478795766830444], [0.2439267635345459, 0.7576767802238464], [0.5246709585189819, -2.606659412384033], [1.150876760482788, 1.4073830842971802], [-0.2643202245235443, 2.0634236335754395], [1.555483341217041, -0.0023102816194295883], [2.0830578804016113, -1.7225427627563477], [-0.5424830317497253, -1.070199728012085], [0.9168899655342102, 0.8955540060997009], [-0.8120972514152527, 2.696739912033081], [-0.29908373951911926, -1.5310651063919067], [1.2320337295532227, -1.556247353553772], [1.8612544536590576, 0.08704725652933121], [0.22133447229862213, -1.8091708421707153], [-0.4403655230998993, -0.38571012020111084], [-1.88539457321167, 1.192205786705017], [2.239687919616699, 0.004709010478109121], [1.139495611190796, 0.45733731985092163], [-1.507995367050171, 0.19716016948223114], [0.46986445784568787, 1.5422041416168213], [-1.2573751211166382, -0.35984551906585693], [-1.7415345907211304, -0.6020717024803162], [1.0751984119415283, 0.19006384909152985], [2.24186635017395, -0.46343153715133667], [0.3610347509384155, -0.07658443599939346], [-1.3111497163772583, 0.432013601064682], [0.6164408326148987, 0.24538464844226837], [-1.9266542196273804, -0.3256155550479889], [-0.5870336890220642, -0.1879584938287735], [-1.0476511716842651, 0.3677721917629242], [-1.229940414428711, 1.2433830499649048], [0.18550436198711395, 0.22753673791885376], [-0.017921989783644676, 0.12625974416732788], [1.1659504175186157, -0.5020995736122131], [-0.5983408093452454, -1.40438973903656], [0.7519024014472961, -0.16282692551612854], [0.9920787811279297, -1.344896912574768], [-0.8103678226470947, 0.3064485788345337], [0.6956969499588013, 1.8208192586898804], [-2.7830491065979004, -0.2299390584230423], [-0.34681546688079834, 2.4890666007995605], [-1.4452646970748901, -1.2216600179672241], [-2.1872897148132324, 0.8926076292991638], [1.706072211265564, -2.8440372943878174], [1.1119003295898438, -2.4923460483551025], [-2.582794666290283, 2.0973289012908936], [0.04987720400094986, -0.2964983284473419], [-2.063807487487793, -0.7847916483879089], [-0.4068813621997833, 0.9135897755622864], [-0.9814359545707703, -0.3874954879283905], [-1.4227229356765747, 0.7337291240692139], [0.3065044581890106, 1.3125417232513428], [1.2160996198654175, -1.9643305540084839], [-1.2163853645324707, 0.14608727395534515], [-2.3030710220336914, -0.37558120489120483], [0.9232977628707886, 2.1843791007995605], [-0.1989777386188507, 1.651851773262024], [-0.714374840259552, -0.39365994930267334], [-0.7805715799331665, -2.099881887435913], [0.9015759229660034, -1.7053706645965576], [0.1033422127366066, 1.5256654024124146], [-1.8773194551467896, 2.324174165725708], [1.9227174520492554, 2.7441604137420654], [-0.5994020104408264, 0.23984014987945557], [1.3496100902557373, -0.9126054644584656], [-0.8765304088592529, -3.1877026557922363], [-1.2040035724639893, -1.5169521570205688], [1.4261796474456787, 2.150200128555298], [1.463774561882019, 1.6656692028045654], [0.20364105701446533, -0.4988172650337219], [0.5195154547691345, -0.24067887663841248], [-1.1116786003112793, -1.1599653959274292], [-0.8490808606147766, -0.1681060940027237], [0.3189965784549713, -0.9641751646995544], [-0.5664751529693604, -0.5951744318008423], [-1.6347930431365967, -0.9137664437294006], [0.44048091769218445, -0.47259435057640076], [-2.147747039794922, 0.47442489862442017], [1.834734320640564, 1.4462147951126099], [1.1777573823928833, 1.0659226179122925], [-0.9568989872932434, 0.09495053440332413], [-1.838529348373413, 0.2950586676597595], [-0.4800611734390259, 0.014894310384988785], [-0.5235516428947449, -1.7687653303146362], [2.0735011100769043, -0.8825281262397766], [2.637502431869507, 0.8455678224563599], [2.606602907180786, -0.7848446369171143], [-1.1886937618255615, 0.9330510497093201], [0.38082656264305115, 0.13328030705451965], [0.6847941875457764, 0.7384101152420044], [1.2638574838638306, -0.007309418171644211], [0.18292222917079926, -1.22371244430542], [0.8143821954727173, 1.4976691007614136], [0.6571850776672363, 0.48368802666664124], [-0.6991601586341858, 2.150190830230713], [0.8101756572723389, 0.10206498205661774], [-0.08768226951360703, -1.084917664527893], [-0.7208092212677002, 0.03657956421375275], [0.3211449086666107, 1.803687334060669], [-0.7835946083068848, 1.6869111061096191], ] ) if (p, n) == (2, 64): return torch.tensor( [ [-2.7216711044311523, 0.14431366324424744], [-0.766914427280426, 1.7193410396575928], [-2.2575762271881104, 1.2476624250411987], [1.233758807182312, -2.3560616970062256], [0.8701965808868408, -0.2649352252483368], [1.4506438970565796, 2.1776366233825684], [-0.06305818259716034, 1.9049758911132812], [2.536226511001587, 0.563927412033081], [0.4599496126174927, -1.8745561838150024], [-1.900517225265503, -0.30703988671302795], [0.09386251866817474, 0.8755807280540466], [1.946500539779663, -0.6743080615997314], [2.1338934898376465, 1.4581491947174072], [0.9429940581321716, -0.8038390278816223], [2.0697755813598633, -1.614896535873413], [0.772676408290863, 0.22017823159694672], [1.0689979791641235, -1.525044322013855], [0.6813604831695557, 1.1345642805099487], [0.4706456661224365, 2.606626272201538], [-1.294018030166626, -0.4372096061706543], [-0.09134224057197571, 0.4610418677330017], [-0.7907772064208984, -0.48412787914276123], [0.060459110885858536, -0.9172890186309814], [-0.5855047702789307, 2.56172513961792], [0.11484206467866898, -2.659848213195801], [-1.5893300771713257, 2.188580274581909], [1.6750942468643188, 0.7089915871620178], [-0.445697546005249, 0.7452405095100403], [-1.8539940118789673, -1.8377939462661743], [-1.5791912078857422, -1.017285943031311], [-1.030419945716858, -1.5746369361877441], [-1.9511750936508179, 0.43696075677871704], [-0.3446580767631531, -1.8953213691711426], [-1.4219647645950317, 0.7676230669021606], [-0.9191089272499084, 0.5021472573280334], [0.20464491844177246, 1.3684605360031128], [0.5402919054031372, 0.6699410676956177], [1.8903915882110596, 0.03638288006186485], [0.4723062515258789, -0.6216739416122437], [-0.41345009207725525, -0.22752176225185394], [2.7119064331054688, -0.5111885070800781], [1.065286636352539, 0.6950305700302124], [0.40629103779792786, -0.14339995384216309], [1.2815024852752686, 0.17108257114887238], [0.01785222627222538, -0.43778058886528015], [0.054590027779340744, -1.4225547313690186], [0.3076786696910858, 0.30697619915008545], [-0.9498570561408997, -0.9576997756958008], [-2.4640724658966064, -0.9660449028015137], [1.3714425563812256, -0.39760473370552063], [-0.4857747256755829, 0.2386789172887802], [1.2797833681106567, 1.3097363710403442], [0.5508887767791748, -1.1777795553207397], [-1.384316325187683, 0.1465839296579361], [-0.46556955575942993, -1.2442727088928223], [-0.3915477693080902, -0.7319604158401489], [-1.4005504846572876, 1.3890998363494873], [-0.8647305965423584, 1.0617644786834717], [-0.8901953101158142, -0.01650036871433258], [-0.9893633723258972, -2.4662880897521973], [1.445534110069275, -1.049334168434143], [-0.041650623083114624, 0.012734669260680676], [-0.3302375078201294, 1.26217782497406], [0.6934980154037476, 1.7714335918426514], ] ) elif (p, n) == (2, 16): return torch.tensor( [ [-0.8996632695198059, -1.6360418796539307], [-0.961183488368988, 1.5999565124511719], [-1.882026195526123, 0.678778350353241], [0.36300793290138245, -1.9667866230010986], [-0.6814072728157043, -0.576818585395813], [0.7270012497901917, 0.6186859607696533], [0.3359416127204895, 1.8371193408966064], [1.859930396080017, 0.036668598651885986], [0.17208248376846313, -0.9401724338531494], [-1.7599700689315796, -0.6244229674339294], [-0.8993809223175049, 0.32267823815345764], [0.839488685131073, -0.3017036020755768], [1.5314953327178955, 1.2942044734954834], [-0.0011779458727687597, 0.00022069070837460458], [1.4274526834487915, -1.207889199256897], [-0.16123905777931213, 0.8787511587142944], ] ) elif (p, n) == (1, 16): return torch.tensor( [ [-2.7325894832611084], [-2.069017171859741], [-1.6180464029312134], [-1.2562311887741089], [-0.9423404335975647], [-0.6567591428756714], [-0.38804829120635986], [-0.12839503586292267], [0.12839503586292267], [0.38804829120635986], [0.6567591428756714], [0.9423404335975647], [1.2562311887741089], [1.6180464029312134], [2.069017171859741], [2.7325894832611084], ] ) elif (p, n) == (1, 8): return torch.tensor( [ [-2.1519455909729004], [-1.3439092636108398], [-0.7560052871704102], [-0.2450941801071167], [0.2450941801071167], [0.7560052871704102], [1.3439092636108398], [2.1519455909729004], ] ) elif (p, n) == (1, 4): return torch.tensor([[-1.5104175806045532], [-0.4527800381183624], [0.4527800381183624], [1.5104175806045532]]) else: raise NotImplementedError(f"Unsupported p={p}, n={n}") def quantize_with_higgs(weight, bits: int = 4, p: int = 2, group_size: int = 256, hadamard_size: int = 1024): assert len(weight.shape) == 2, "Only 2D weights are supported for now" grid = get_higgs_grid(p, 2 ** (p * bits)).to(weight.device) grid_norm_2 = torch.linalg.norm(grid, axis=-1) ** 2 device = weight.device dtype = weight.dtype weight = weight.clone().float() # Pad to Hadamard transform size weight = pad_to_block(weight, [1], hadamard_size) # Scale and Hadamard transform mult = weight.shape[1] // hadamard_size weight = weight.reshape(-1, mult, hadamard_size) scales = torch.linalg.norm(weight, axis=-1) weight = hadamard_transform(weight, 1) / scales[:, :, None] # Pad to edenn_d and project weight = pad_to_block(weight, [2], p).reshape(weight.shape[0], mult, -1, p) # Quantize codes = torch.empty(weight.shape[:-1], device=device, dtype=torch.uint8) for i in range(0, weight.shape[0], 64): codes[i : i + 64] = torch.argmax(2 * weight[i : i + 64] @ grid.T - grid_norm_2, dim=-1).to(torch.uint8) del weight codes = codes.reshape(codes.shape[0], -1) scales = scales / sqrt(hadamard_size) weight, scales, tables, tables2 = prepare_data_transposed( codes, torch.repeat_interleave(scales.to(dtype), hadamard_size // group_size, dim=1), grid.to(dtype), num_bits=bits, group_size=group_size, vector_size=p, dtype=dtype, device=device, ) return { "weight": weight, "scales": scales, "tables": tables, "tables2": tables2.view(dtype=torch.float16), } class HiggsLinear(torch.nn.Module): def __init__( self, in_features: int, out_features: int, num_bits: int, bias=True, dtype: torch.dtype = None, device: torch.device = None, group_size: int = 256, hadamard_size: int = 1024, ): super().__init__() self.in_features = in_features self.out_features = out_features self.num_bits = num_bits self.group_size = group_size self.hadamard_size = hadamard_size self.num_sms_packed = nn.Parameter(torch.tensor(-1, dtype=torch.int32, device=device), requires_grad=False) assert in_features % group_size == 0 assert num_bits in [2, 3, 4] self.weight = nn.Parameter( torch.empty((out_features * num_bits // 16, in_features), dtype=torch.int16, device=device), requires_grad=False, ) self.scales = nn.Parameter( torch.empty((out_features, in_features // group_size), dtype=dtype, device=device), requires_grad=False ) self.tables = nn.Parameter(torch.empty((2**num_bits,), dtype=dtype, device=device), requires_grad=False) self.tables2 = nn.Parameter( torch.empty((2**num_bits, 2**num_bits, 2), dtype=dtype, device=device), requires_grad=False ) if bias: self.bias = nn.Parameter(torch.empty(out_features, device=device, dtype=dtype), requires_grad=False) else: self.register_parameter("bias", None) self.workspace = None # must be set externally to be reused among layers def forward(self, x): x = pad_to_block(x, [-1], self.hadamard_size) if self.workspace is None: raise Exception("Workspace must be set before calling forward") return flute.qgemm_hadamard( x, self.weight, self.scales, self.tables, self.tables2.view(dtype=torch.float32), self.workspace, self.num_bits, self.group_size, self.hadamard_size, ) def replace_with_higgs_linear( model, quantization_config=None, current_key_name=None, has_been_replaced=False, ): """ Public method that recursively replaces the Linear layers of the given model with HIGGS quantized layers. `accelerate` is needed to use this method. Returns the converted model and a boolean that indicates if the conversion has been successfull or not. Args: model (`torch.nn.Module`): The model to convert, can be any `torch.nn.Module` instance. quantization_config (`HiggsConfig`): The quantization config object that contains the quantization parameters. current_key_name (`list`, *optional*): A list that contains the current key name. This is used for recursion and should not be passed by the user. has_been_replaced (`bool`, *optional*): A boolean that indicates if the conversion has been successful or not. This is used for recursion and should not be passed by the user. """ from accelerate import init_empty_weights for name, module in model.named_children(): if current_key_name is None: current_key_name = [] current_key_name.append(name) if isinstance(module, nn.Linear): # Check if the current key is not in the `quantization_config.modules_to_not_convert` current_key_name_str = ".".join(current_key_name) if not any(current_key_name_str.endswith(key) for key in quantization_config.modules_to_not_convert): with init_empty_weights(): in_features = module.in_features out_features = module.out_features model._modules[name] = HiggsLinear( in_features, out_features, bias=module.bias is not None, num_bits=quantization_config.bits, hadamard_size=quantization_config.hadamard_size, group_size=quantization_config.group_size, ) has_been_replaced = True # Store the module class in case we need to transpose the weight later model._modules[name].source_cls = type(module) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(False) if len(list(module.children())) > 0: _, has_been_replaced = replace_with_higgs_linear( module, quantization_config=quantization_config, current_key_name=current_key_name, has_been_replaced=has_been_replaced, ) # Remove the last key for recursion current_key_name.pop(-1) return model, has_been_replaced def dequantize_higgs(model, current_key_name=None): """ Dequantizes the HiggsLinear layers in the given model by replacing them with standard torch.nn.Linear layers. Args: model (torch.nn.Module): The model containing HiggsLinear layers to be dequantized. current_key_name (list, optional): A list to keep track of the current module names during recursion. Defaults to None. Returns: torch.nn.Module: The model with HiggsLinear layers replaced by torch.nn.Linear layers. """ with torch.no_grad(): for name, module in model.named_children(): if current_key_name is None: current_key_name = [] current_key_name.append(name) if isinstance(module, HiggsLinear): in_features = module.in_features out_features = module.out_features model._modules[name] = torch.nn.Linear( in_features, out_features, bias=module.bias is not None, device=module.scales.device, dtype=module.scales.dtype, ) model._modules[name].weight.data = module( torch.eye(in_features, device=module.scales.device, dtype=module.scales.dtype) ).T.contiguous() if len(list(module.children())) > 0: _ = dequantize_higgs( module, current_key_name=current_key_name, ) # Remove the last key for recursion current_key_name.pop(-1) return model
transformers/src/transformers/integrations/higgs.py/0
{ "file_path": "transformers/src/transformers/integrations/higgs.py", "repo_id": "transformers", "token_count": 18032 }
89
#include <torch/extension.h> #include <ATen/ATen.h> #include <vector> #define min(a, b) ((a)<(b)?(a):(b)) #define max(a, b) ((a)>(b)?(a):(b)) std::vector<at::Tensor> index_max_kernel( at::Tensor index_vals, at::Tensor indices, int A_num_block, int B_num_block ); at::Tensor mm_to_sparse_kernel( at::Tensor dense_A, at::Tensor dense_B, at::Tensor indices ); at::Tensor sparse_dense_mm_kernel( at::Tensor sparse_A, at::Tensor indices, at::Tensor dense_B, int A_num_block ); at::Tensor reduce_sum_kernel( at::Tensor sparse_A, at::Tensor indices, int A_num_block, int B_num_block ); at::Tensor scatter_kernel( at::Tensor dense_A, at::Tensor indices, int B_num_block );
transformers/src/transformers/kernels/mra/cuda_launch.h/0
{ "file_path": "transformers/src/transformers/kernels/mra/cuda_launch.h", "repo_id": "transformers", "token_count": 312 }
90
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.nn as nn import torch.nn.functional as F from ..utils import is_scipy_available, is_vision_available, requires_backends from .loss_for_object_detection import ( _set_aux_loss, box_iou, dice_loss, generalized_box_iou, nested_tensor_from_tensor_list, sigmoid_focal_loss, ) if is_scipy_available(): from scipy.optimize import linear_sum_assignment if is_vision_available(): from transformers.image_transforms import center_to_corners_format class RTDetrHungarianMatcher(nn.Module): """This class computes an assignment between the targets and the predictions of the network For efficiency reasons, the targets don't include the no_object. Because of this, in general, there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, while the others are un-matched (and thus treated as non-objects). Args: config: RTDetrConfig """ def __init__(self, config): super().__init__() requires_backends(self, ["scipy"]) self.class_cost = config.matcher_class_cost self.bbox_cost = config.matcher_bbox_cost self.giou_cost = config.matcher_giou_cost self.use_focal_loss = config.use_focal_loss self.alpha = config.matcher_alpha self.gamma = config.matcher_gamma if self.class_cost == self.bbox_cost == self.giou_cost == 0: raise ValueError("All costs of the Matcher can't be 0") @torch.no_grad() def forward(self, outputs, targets): """Performs the matching Params: outputs: This is a dict that contains at least these entries: "logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing: "class_labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth objects in the target) containing the class labels "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates Returns: A list of size batch_size, containing tuples of (index_i, index_j) where: - index_i is the indices of the selected predictions (in order) - index_j is the indices of the corresponding selected targets (in order) For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes) """ batch_size, num_queries = outputs["logits"].shape[:2] # We flatten to compute the cost matrices in a batch out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4] # Also concat the target labels and boxes target_ids = torch.cat([v["class_labels"] for v in targets]) target_bbox = torch.cat([v["boxes"] for v in targets]) # Compute the classification cost. Contrary to the loss, we don't use the NLL, # but approximate it in 1 - proba[target class]. # The 1 is a constant that doesn't change the matching, it can be ommitted. if self.use_focal_loss: out_prob = F.sigmoid(outputs["logits"].flatten(0, 1)) out_prob = out_prob[:, target_ids] neg_cost_class = (1 - self.alpha) * (out_prob**self.gamma) * (-(1 - out_prob + 1e-8).log()) pos_cost_class = self.alpha * ((1 - out_prob) ** self.gamma) * (-(out_prob + 1e-8).log()) class_cost = pos_cost_class - neg_cost_class else: out_prob = outputs["logits"].flatten(0, 1).softmax(-1) # [batch_size * num_queries, num_classes] class_cost = -out_prob[:, target_ids] # Compute the L1 cost between boxes bbox_cost = torch.cdist(out_bbox, target_bbox, p=1) # Compute the giou cost betwen boxes giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox)) # Compute the final cost matrix cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu() sizes = [len(v["boxes"]) for v in targets] indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))] return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices] class RTDetrLoss(nn.Module): """ This class computes the losses for RTDetr. The process happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth / prediction (supervise class and box). Args: matcher (`DetrHungarianMatcher`): Module able to compute a matching between targets and proposals. weight_dict (`Dict`): Dictionary relating each loss with its weights. These losses are configured in RTDetrConf as `weight_loss_vfl`, `weight_loss_bbox`, `weight_loss_giou` losses (`List[str]`): List of all the losses to be applied. See `get_loss` for a list of all available losses. alpha (`float`): Parameter alpha used to compute the focal loss. gamma (`float`): Parameter gamma used to compute the focal loss. eos_coef (`float`): Relative classification weight applied to the no-object category. num_classes (`int`): Number of object categories, omitting the special no-object category. """ def __init__(self, config): super().__init__() self.matcher = RTDetrHungarianMatcher(config) self.num_classes = config.num_labels self.weight_dict = { "loss_vfl": config.weight_loss_vfl, "loss_bbox": config.weight_loss_bbox, "loss_giou": config.weight_loss_giou, } self.losses = ["vfl", "boxes"] self.eos_coef = config.eos_coefficient empty_weight = torch.ones(config.num_labels + 1) empty_weight[-1] = self.eos_coef self.register_buffer("empty_weight", empty_weight) self.alpha = config.focal_loss_alpha self.gamma = config.focal_loss_gamma def loss_labels_vfl(self, outputs, targets, indices, num_boxes, log=True): if "pred_boxes" not in outputs: raise KeyError("No predicted boxes found in outputs") if "logits" not in outputs: raise KeyError("No predicted logits found in outputs") idx = self._get_source_permutation_idx(indices) src_boxes = outputs["pred_boxes"][idx] target_boxes = torch.cat([_target["boxes"][i] for _target, (_, i) in zip(targets, indices)], dim=0) ious, _ = box_iou(center_to_corners_format(src_boxes), center_to_corners_format(target_boxes)) ious = torch.diag(ious).detach() src_logits = outputs["logits"] target_classes_original = torch.cat([_target["class_labels"][i] for _target, (_, i) in zip(targets, indices)]) target_classes = torch.full( src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device ) target_classes[idx] = target_classes_original target = F.one_hot(target_classes, num_classes=self.num_classes + 1)[..., :-1] target_score_original = torch.zeros_like(target_classes, dtype=src_logits.dtype) target_score_original[idx] = ious.to(target_score_original.dtype) target_score = target_score_original.unsqueeze(-1) * target pred_score = F.sigmoid(src_logits).detach() weight = self.alpha * pred_score.pow(self.gamma) * (1 - target) + target_score loss = F.binary_cross_entropy_with_logits(src_logits, target_score, weight=weight, reduction="none") loss = loss.mean(1).sum() * src_logits.shape[1] / num_boxes return {"loss_vfl": loss} def loss_labels(self, outputs, targets, indices, num_boxes, log=True): """Classification loss (NLL) targets dicts must contain the key "class_labels" containing a tensor of dim [nb_target_boxes] """ if "logits" not in outputs: raise KeyError("No logits were found in the outputs") src_logits = outputs["logits"] idx = self._get_source_permutation_idx(indices) target_classes_original = torch.cat([_target["class_labels"][i] for _target, (_, i) in zip(targets, indices)]) target_classes = torch.full( src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device ) target_classes[idx] = target_classes_original loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.class_weight) losses = {"loss_ce": loss_ce} return losses @torch.no_grad() def loss_cardinality(self, outputs, targets, indices, num_boxes): """ Compute the cardinality error, i.e. the absolute error in the number of predicted non-empty boxes. This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients. """ logits = outputs["logits"] device = logits.device target_lengths = torch.as_tensor([len(v["class_labels"]) for v in targets], device=device) # Count the number of predictions that are NOT "no-object" (which is the last class) card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1) card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float()) losses = {"cardinality_error": card_err} return losses def loss_boxes(self, outputs, targets, indices, num_boxes): """ Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss. Targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]. The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. """ if "pred_boxes" not in outputs: raise KeyError("No predicted boxes found in outputs") idx = self._get_source_permutation_idx(indices) src_boxes = outputs["pred_boxes"][idx] target_boxes = torch.cat([t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0) losses = {} loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction="none") losses["loss_bbox"] = loss_bbox.sum() / num_boxes loss_giou = 1 - torch.diag( generalized_box_iou(center_to_corners_format(src_boxes), center_to_corners_format(target_boxes)) ) losses["loss_giou"] = loss_giou.sum() / num_boxes return losses def loss_masks(self, outputs, targets, indices, num_boxes): """ Compute the losses related to the masks: the focal loss and the dice loss. Targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]. """ if "pred_masks" not in outputs: raise KeyError("No predicted masks found in outputs") source_idx = self._get_source_permutation_idx(indices) target_idx = self._get_target_permutation_idx(indices) source_masks = outputs["pred_masks"] source_masks = source_masks[source_idx] masks = [t["masks"] for t in targets] target_masks, valid = nested_tensor_from_tensor_list(masks).decompose() target_masks = target_masks.to(source_masks) target_masks = target_masks[target_idx] # upsample predictions to the target size source_masks = nn.functional.interpolate( source_masks[:, None], size=target_masks.shape[-2:], mode="bilinear", align_corners=False ) source_masks = source_masks[:, 0].flatten(1) target_masks = target_masks.flatten(1) target_masks = target_masks.view(source_masks.shape) losses = { "loss_mask": sigmoid_focal_loss(source_masks, target_masks, num_boxes), "loss_dice": dice_loss(source_masks, target_masks, num_boxes), } return losses def loss_labels_bce(self, outputs, targets, indices, num_boxes, log=True): src_logits = outputs["logits"] idx = self._get_source_permutation_idx(indices) target_classes_original = torch.cat([_target["class_labels"][i] for _target, (_, i) in zip(targets, indices)]) target_classes = torch.full( src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device ) target_classes[idx] = target_classes_original target = F.one_hot(target_classes, num_classes=self.num_classes + 1)[..., :-1] loss = F.binary_cross_entropy_with_logits(src_logits, target * 1.0, reduction="none") loss = loss.mean(1).sum() * src_logits.shape[1] / num_boxes return {"loss_bce": loss} def _get_source_permutation_idx(self, indices): # permute predictions following indices batch_idx = torch.cat([torch.full_like(source, i) for i, (source, _) in enumerate(indices)]) source_idx = torch.cat([source for (source, _) in indices]) return batch_idx, source_idx def _get_target_permutation_idx(self, indices): # permute targets following indices batch_idx = torch.cat([torch.full_like(target, i) for i, (_, target) in enumerate(indices)]) target_idx = torch.cat([target for (_, target) in indices]) return batch_idx, target_idx def loss_labels_focal(self, outputs, targets, indices, num_boxes, log=True): if "logits" not in outputs: raise KeyError("No logits found in outputs") src_logits = outputs["logits"] idx = self._get_source_permutation_idx(indices) target_classes_original = torch.cat([_target["class_labels"][i] for _target, (_, i) in zip(targets, indices)]) target_classes = torch.full( src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device ) target_classes[idx] = target_classes_original target = F.one_hot(target_classes, num_classes=self.num_classes + 1)[..., :-1] loss = sigmoid_focal_loss(src_logits, target, self.alpha, self.gamma) loss = loss.mean(1).sum() * src_logits.shape[1] / num_boxes return {"loss_focal": loss} def get_loss(self, loss, outputs, targets, indices, num_boxes): loss_map = { "labels": self.loss_labels, "cardinality": self.loss_cardinality, "boxes": self.loss_boxes, "masks": self.loss_masks, "bce": self.loss_labels_bce, "focal": self.loss_labels_focal, "vfl": self.loss_labels_vfl, } if loss not in loss_map: raise ValueError(f"Loss {loss} not supported") return loss_map[loss](outputs, targets, indices, num_boxes) @staticmethod def get_cdn_matched_indices(dn_meta, targets): dn_positive_idx, dn_num_group = dn_meta["dn_positive_idx"], dn_meta["dn_num_group"] num_gts = [len(t["class_labels"]) for t in targets] device = targets[0]["class_labels"].device dn_match_indices = [] for i, num_gt in enumerate(num_gts): if num_gt > 0: gt_idx = torch.arange(num_gt, dtype=torch.int64, device=device) gt_idx = gt_idx.tile(dn_num_group) assert len(dn_positive_idx[i]) == len(gt_idx) dn_match_indices.append((dn_positive_idx[i], gt_idx)) else: dn_match_indices.append( ( torch.zeros(0, dtype=torch.int64, device=device), torch.zeros(0, dtype=torch.int64, device=device), ) ) return dn_match_indices def forward(self, outputs, targets): """ This performs the loss computation. Args: outputs (`dict`, *optional*): Dictionary of tensors, see the output specification of the model for the format. targets (`List[dict]`, *optional*): List of dicts, such that `len(targets) == batch_size`. The expected keys in each dict depends on the losses applied, see each loss' doc. """ outputs_without_aux = {k: v for k, v in outputs.items() if "auxiliary_outputs" not in k} # Retrieve the matching between the outputs of the last layer and the targets indices = self.matcher(outputs_without_aux, targets) # Compute the average number of target boxes across all nodes, for normalization purposes num_boxes = sum(len(t["class_labels"]) for t in targets) num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device) num_boxes = torch.clamp(num_boxes, min=1).item() # Compute all the requested losses losses = {} for loss in self.losses: l_dict = self.get_loss(loss, outputs, targets, indices, num_boxes) l_dict = {k: l_dict[k] * self.weight_dict[k] for k in l_dict if k in self.weight_dict} losses.update(l_dict) # In case of auxiliary losses, we repeat this process with the output of each intermediate layer. if "auxiliary_outputs" in outputs: for i, auxiliary_outputs in enumerate(outputs["auxiliary_outputs"]): indices = self.matcher(auxiliary_outputs, targets) for loss in self.losses: if loss == "masks": # Intermediate masks losses are too costly to compute, we ignore them. continue l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes) l_dict = {k: l_dict[k] * self.weight_dict[k] for k in l_dict if k in self.weight_dict} l_dict = {k + f"_aux_{i}": v for k, v in l_dict.items()} losses.update(l_dict) # In case of cdn auxiliary losses. For rtdetr if "dn_auxiliary_outputs" in outputs: if "denoising_meta_values" not in outputs: raise ValueError( "The output must have the 'denoising_meta_values` key. Please, ensure that 'outputs' includes a 'denoising_meta_values' entry." ) indices = self.get_cdn_matched_indices(outputs["denoising_meta_values"], targets) num_boxes = num_boxes * outputs["denoising_meta_values"]["dn_num_group"] for i, auxiliary_outputs in enumerate(outputs["dn_auxiliary_outputs"]): # indices = self.matcher(auxiliary_outputs, targets) for loss in self.losses: if loss == "masks": # Intermediate masks losses are too costly to compute, we ignore them. continue kwargs = {} l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes, **kwargs) l_dict = {k: l_dict[k] * self.weight_dict[k] for k in l_dict if k in self.weight_dict} l_dict = {k + f"_dn_{i}": v for k, v in l_dict.items()} losses.update(l_dict) return losses def RTDetrForObjectDetectionLoss( logits, labels, device, pred_boxes, config, outputs_class=None, outputs_coord=None, enc_topk_logits=None, enc_topk_bboxes=None, denoising_meta_values=None, **kwargs, ): criterion = RTDetrLoss(config) criterion.to(device) # Second: compute the losses, based on outputs and labels outputs_loss = {} outputs_loss["logits"] = logits outputs_loss["pred_boxes"] = pred_boxes if config.auxiliary_loss: if denoising_meta_values is not None: dn_out_coord, outputs_coord = torch.split(outputs_coord, denoising_meta_values["dn_num_split"], dim=2) dn_out_class, outputs_class = torch.split(outputs_class, denoising_meta_values["dn_num_split"], dim=2) auxiliary_outputs = _set_aux_loss(outputs_class[:, :-1].transpose(0, 1), outputs_coord[:, :-1].transpose(0, 1)) outputs_loss["auxiliary_outputs"] = auxiliary_outputs outputs_loss["auxiliary_outputs"].extend(_set_aux_loss([enc_topk_logits], [enc_topk_bboxes])) if denoising_meta_values is not None: outputs_loss["dn_auxiliary_outputs"] = _set_aux_loss( dn_out_class.transpose(0, 1), dn_out_coord.transpose(0, 1) ) outputs_loss["denoising_meta_values"] = denoising_meta_values loss_dict = criterion(outputs_loss, labels) loss = sum(loss_dict.values()) return loss, loss_dict, auxiliary_outputs
transformers/src/transformers/loss/loss_rt_detr.py/0
{ "file_path": "transformers/src/transformers/loss/loss_rt_detr.py", "repo_id": "transformers", "token_count": 9354 }
91
# coding=utf-8 # Copyright 2022 WenXiang ZhongzhiCheng LedellWu LiuGuang BoWenZhang The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Image/Text processor class for AltCLIP """ from typing import List, Union from ...image_utils import ImageInput from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack from ...tokenization_utils_base import BatchEncoding, PreTokenizedInput, TextInput from ...utils.deprecation import deprecate_kwarg class AltClipProcessorKwargs(ProcessingKwargs, total=False): _defaults = {} class AltCLIPProcessor(ProcessorMixin): r""" Constructs a AltCLIP processor which wraps a CLIP image processor and a XLM-Roberta tokenizer into a single processor. [`AltCLIPProcessor`] offers all the functionalities of [`CLIPImageProcessor`] and [`XLMRobertaTokenizerFast`]. See the [`~AltCLIPProcessor.__call__`] and [`~AltCLIPProcessor.decode`] for more information. Args: image_processor ([`CLIPImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`XLMRobertaTokenizerFast`], *optional*): The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "CLIPImageProcessor" tokenizer_class = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast") @deprecate_kwarg(old_name="feature_extractor", version="5.0.0", new_name="image_processor") def __init__(self, image_processor=None, tokenizer=None): if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(image_processor, tokenizer) def __call__( self, images: ImageInput = None, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, audio=None, videos=None, **kwargs: Unpack[AltClipProcessorKwargs], ) -> BatchEncoding: """ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to XLMRobertaTokenizerFast's [`~XLMRobertaTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring of the above two methods for more information. Args: images (`ImageInput`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. text (`TextInput`, `PreTokenizedInput`, `List[TextInput]`, `List[PreTokenizedInput]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. - `'jax'`: Return JAX `jnp.ndarray` objects. Returns: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. """ if text is None and images is None: raise ValueError("You must specify either text or images.") if text is None and images is None: raise ValueError("You must specify either text or images.") output_kwargs = self._merge_kwargs( AltClipProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) if text is not None: encoding = self.tokenizer(text, **output_kwargs["text_kwargs"]) if images is not None: image_features = self.image_processor(images, **output_kwargs["images_kwargs"]) # BC for explicit return_tensors if "return_tensors" in output_kwargs["common_kwargs"]: return_tensors = output_kwargs["common_kwargs"].pop("return_tensors", None) if text is not None and images is not None: encoding["pixel_values"] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors) def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to XLMRobertaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to XLMRobertaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) __all__ = ["AltCLIPProcessor"]
transformers/src/transformers/models/altclip/processing_altclip.py/0
{ "file_path": "transformers/src/transformers/models/altclip/processing_altclip.py", "repo_id": "transformers", "token_count": 2643 }
92
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """AutoFeatureExtractor class.""" import importlib import json import os import warnings from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) logger = logging.get_logger(__name__) FEATURE_EXTRACTOR_MAPPING_NAMES = OrderedDict( [ ("audio-spectrogram-transformer", "ASTFeatureExtractor"), ("beit", "BeitFeatureExtractor"), ("chinese_clip", "ChineseCLIPFeatureExtractor"), ("clap", "ClapFeatureExtractor"), ("clip", "CLIPFeatureExtractor"), ("clipseg", "ViTFeatureExtractor"), ("clvp", "ClvpFeatureExtractor"), ("conditional_detr", "ConditionalDetrFeatureExtractor"), ("convnext", "ConvNextFeatureExtractor"), ("cvt", "ConvNextFeatureExtractor"), ("dac", "DacFeatureExtractor"), ("data2vec-audio", "Wav2Vec2FeatureExtractor"), ("data2vec-vision", "BeitFeatureExtractor"), ("deformable_detr", "DeformableDetrFeatureExtractor"), ("deit", "DeiTFeatureExtractor"), ("detr", "DetrFeatureExtractor"), ("dinat", "ViTFeatureExtractor"), ("donut-swin", "DonutFeatureExtractor"), ("dpt", "DPTFeatureExtractor"), ("encodec", "EncodecFeatureExtractor"), ("flava", "FlavaFeatureExtractor"), ("glpn", "GLPNFeatureExtractor"), ("groupvit", "CLIPFeatureExtractor"), ("hubert", "Wav2Vec2FeatureExtractor"), ("imagegpt", "ImageGPTFeatureExtractor"), ("layoutlmv2", "LayoutLMv2FeatureExtractor"), ("layoutlmv3", "LayoutLMv3FeatureExtractor"), ("levit", "LevitFeatureExtractor"), ("maskformer", "MaskFormerFeatureExtractor"), ("mctct", "MCTCTFeatureExtractor"), ("mimi", "EncodecFeatureExtractor"), ("mobilenet_v1", "MobileNetV1FeatureExtractor"), ("mobilenet_v2", "MobileNetV2FeatureExtractor"), ("mobilevit", "MobileViTFeatureExtractor"), ("moonshine", "Wav2Vec2FeatureExtractor"), ("moshi", "EncodecFeatureExtractor"), ("nat", "ViTFeatureExtractor"), ("owlvit", "OwlViTFeatureExtractor"), ("perceiver", "PerceiverFeatureExtractor"), ("poolformer", "PoolFormerFeatureExtractor"), ("pop2piano", "Pop2PianoFeatureExtractor"), ("regnet", "ConvNextFeatureExtractor"), ("resnet", "ConvNextFeatureExtractor"), ("seamless_m4t", "SeamlessM4TFeatureExtractor"), ("seamless_m4t_v2", "SeamlessM4TFeatureExtractor"), ("segformer", "SegformerFeatureExtractor"), ("sew", "Wav2Vec2FeatureExtractor"), ("sew-d", "Wav2Vec2FeatureExtractor"), ("speech_to_text", "Speech2TextFeatureExtractor"), ("speecht5", "SpeechT5FeatureExtractor"), ("swiftformer", "ViTFeatureExtractor"), ("swin", "ViTFeatureExtractor"), ("swinv2", "ViTFeatureExtractor"), ("table-transformer", "DetrFeatureExtractor"), ("timesformer", "VideoMAEFeatureExtractor"), ("tvlt", "TvltFeatureExtractor"), ("unispeech", "Wav2Vec2FeatureExtractor"), ("unispeech-sat", "Wav2Vec2FeatureExtractor"), ("univnet", "UnivNetFeatureExtractor"), ("van", "ConvNextFeatureExtractor"), ("videomae", "VideoMAEFeatureExtractor"), ("vilt", "ViltFeatureExtractor"), ("vit", "ViTFeatureExtractor"), ("vit_mae", "ViTFeatureExtractor"), ("vit_msn", "ViTFeatureExtractor"), ("wav2vec2", "Wav2Vec2FeatureExtractor"), ("wav2vec2-bert", "Wav2Vec2FeatureExtractor"), ("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"), ("wavlm", "Wav2Vec2FeatureExtractor"), ("whisper", "WhisperFeatureExtractor"), ("xclip", "CLIPFeatureExtractor"), ("yolos", "YolosFeatureExtractor"), ] ) FEATURE_EXTRACTOR_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def feature_extractor_class_from_name(class_name: str): for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: module_name = model_type_to_module_name(module_name) module = importlib.import_module(f".{module_name}", "transformers.models") try: return getattr(module, class_name) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(extractor, "__name__", None) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. main_module = importlib.import_module("transformers") if hasattr(main_module, class_name): return getattr(main_module, class_name) return None def get_feature_extractor_config( pretrained_model_name_or_path: Union[str, os.PathLike], cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, resume_download: Optional[bool] = None, proxies: Optional[Dict[str, str]] = None, token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, local_files_only: bool = False, **kwargs, ): """ Loads the tokenizer configuration from a pretrained model tokenizer configuration. Args: pretrained_model_name_or_path (`str` or `os.PathLike`): This can be either: - a string, the *model id* of a pretrained model configuration hosted inside a model repo on huggingface.co. - a path to a *directory* containing a configuration file saved using the [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force to (re-)download the configuration files and override the cached versions if they exist. resume_download: Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v5 of Transformers. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. local_files_only (`bool`, *optional*, defaults to `False`): If `True`, will only try to load the tokenizer configuration from local files. <Tip> Passing `token=True` is required when you want to use a private model. </Tip> Returns: `Dict`: The configuration of the tokenizer. Examples: ```python # Download configuration from huggingface.co and cache. tokenizer_config = get_tokenizer_config("google-bert/bert-base-uncased") # This model does not have a tokenizer config so the result will be an empty dict. tokenizer_config = get_tokenizer_config("FacebookAI/xlm-roberta-base") # Save a pretrained tokenizer locally and you can reload its config from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased") tokenizer.save_pretrained("tokenizer-test") tokenizer_config = get_tokenizer_config("tokenizer-test") ```""" use_auth_token = kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") token = use_auth_token resolved_config_file = get_file_from_repo( pretrained_model_name_or_path, FEATURE_EXTRACTOR_NAME, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, token=token, revision=revision, local_files_only=local_files_only, ) if resolved_config_file is None: logger.info( "Could not locate the feature extractor configuration file, will try to use the model config instead." ) return {} with open(resolved_config_file, encoding="utf-8") as reader: return json.load(reader) class AutoFeatureExtractor: r""" This is a generic feature extractor class that will be instantiated as one of the feature extractor classes of the library when created with the [`AutoFeatureExtractor.from_pretrained`] class method. This class cannot be instantiated directly using `__init__()` (throws an error). """ def __init__(self): raise EnvironmentError( "AutoFeatureExtractor is designed to be instantiated " "using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." ) @classmethod @replace_list_option_in_docstrings(FEATURE_EXTRACTOR_MAPPING_NAMES) def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): r""" Instantiate one of the feature extractor classes of the library from a pretrained model vocabulary. The feature extractor class to instantiate is selected based on the `model_type` property of the config object (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by falling back to using pattern matching on `pretrained_model_name_or_path`: List options Params: pretrained_model_name_or_path (`str` or `os.PathLike`): This can be either: - a string, the *model id* of a pretrained feature_extractor hosted inside a model repo on huggingface.co. - a path to a *directory* containing a feature extractor file saved using the [`~feature_extraction_utils.FeatureExtractionMixin.save_pretrained`] method, e.g., `./my_model_directory/`. - a path or url to a saved feature extractor JSON *file*, e.g., `./my_model_directory/preprocessor_config.json`. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model feature extractor should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force to (re-)download the feature extractor files and override the cached versions if they exist. resume_download: Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v5 of Transformers. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. return_unused_kwargs (`bool`, *optional*, defaults to `False`): If `False`, then this function returns just the final feature extractor object. If `True`, then this functions returns a `Tuple(feature_extractor, unused_kwargs)` where *unused_kwargs* is a dictionary consisting of the key/value pairs whose keys are not feature extractor attributes: i.e., the part of `kwargs` which has not been used to update `feature_extractor` and is otherwise ignored. trust_remote_code (`bool`, *optional*, defaults to `False`): Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine. kwargs (`Dict[str, Any]`, *optional*): The values in kwargs of any keys which are feature extractor attributes will be used to override the loaded values. Behavior concerning key/value pairs whose keys are *not* feature extractor attributes is controlled by the `return_unused_kwargs` keyword parameter. <Tip> Passing `token=True` is required when you want to use a private model. </Tip> Examples: ```python >>> from transformers import AutoFeatureExtractor >>> # Download feature extractor from huggingface.co and cache. >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h") >>> # If feature extractor files are in a directory (e.g. feature extractor was saved using *save_pretrained('./test/saved_model/')*) >>> # feature_extractor = AutoFeatureExtractor.from_pretrained("./test/saved_model/") ```""" use_auth_token = kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if kwargs.get("token", None) is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) kwargs["token"] = use_auth_token config = kwargs.pop("config", None) trust_remote_code = kwargs.pop("trust_remote_code", None) kwargs["_from_auto"] = True config_dict, _ = FeatureExtractionMixin.get_feature_extractor_dict(pretrained_model_name_or_path, **kwargs) feature_extractor_class = config_dict.get("feature_extractor_type", None) feature_extractor_auto_map = None if "AutoFeatureExtractor" in config_dict.get("auto_map", {}): feature_extractor_auto_map = config_dict["auto_map"]["AutoFeatureExtractor"] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(config, PretrainedConfig): config = AutoConfig.from_pretrained( pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs ) # It could be in `config.feature_extractor_type`` feature_extractor_class = getattr(config, "feature_extractor_type", None) if hasattr(config, "auto_map") and "AutoFeatureExtractor" in config.auto_map: feature_extractor_auto_map = config.auto_map["AutoFeatureExtractor"] if feature_extractor_class is not None: feature_extractor_class = feature_extractor_class_from_name(feature_extractor_class) has_remote_code = feature_extractor_auto_map is not None has_local_code = feature_extractor_class is not None or type(config) in FEATURE_EXTRACTOR_MAPPING trust_remote_code = resolve_trust_remote_code( trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code ) if has_remote_code and trust_remote_code: feature_extractor_class = get_class_from_dynamic_module( feature_extractor_auto_map, pretrained_model_name_or_path, **kwargs ) _ = kwargs.pop("code_revision", None) if os.path.isdir(pretrained_model_name_or_path): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(config_dict, **kwargs) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(config_dict, **kwargs) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(config) in FEATURE_EXTRACTOR_MAPPING: feature_extractor_class = FEATURE_EXTRACTOR_MAPPING[type(config)] return feature_extractor_class.from_dict(config_dict, **kwargs) raise ValueError( f"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a " f"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following " f"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys())}" ) @staticmethod def register(config_class, feature_extractor_class, exist_ok=False): """ Register a new feature extractor for this class. Args: config_class ([`PretrainedConfig`]): The configuration corresponding to the model to register. feature_extractor_class ([`FeatureExtractorMixin`]): The feature extractor to register. """ FEATURE_EXTRACTOR_MAPPING.register(config_class, feature_extractor_class, exist_ok=exist_ok)
transformers/src/transformers/models/auto/feature_extraction_auto.py/0
{ "file_path": "transformers/src/transformers/models/auto/feature_extraction_auto.py", "repo_id": "transformers", "token_count": 7831 }
93
# coding=utf-8 # Copyright 2023 The Suno AI Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BARK model configuration""" from typing import Dict from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings, logging from ..auto import CONFIG_MAPPING, AutoConfig logger = logging.get_logger(__name__) BARK_SUBMODELCONFIG_START_DOCSTRING = """ This is the configuration class to store the configuration of a [`{model}`]. It is used to instantiate the model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Bark [suno/bark](https://huggingface.co/suno/bark) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: block_size (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). input_vocab_size (`int`, *optional*, defaults to 10_048): Vocabulary size of a Bark sub-model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`{model}`]. Defaults to 10_048 but should be carefully thought with regards to the chosen sub-model. output_vocab_size (`int`, *optional*, defaults to 10_048): Output vocabulary size of a Bark sub-model. Defines the number of different tokens that can be represented by the: `output_ids` when passing forward a [`{model}`]. Defaults to 10_048 but should be carefully thought with regards to the chosen sub-model. num_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the given sub-model. num_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer architecture. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the "intermediate" (often named feed-forward) layer in the architecture. dropout (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. bias (`bool`, *optional*, defaults to `True`): Whether or not to use bias in the linear layers and layer norm layers. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). """ class BarkSubModelConfig(PretrainedConfig): keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", "vocab_size": "input_vocab_size", "window_size": "block_size", } def __init__( self, block_size=1024, input_vocab_size=10_048, output_vocab_size=10_048, num_layers=12, num_heads=12, hidden_size=768, dropout=0.0, bias=True, # True: bias in Linears and LayerNorms, like GPT-2. False: a bit better and faster initializer_range=0.02, use_cache=True, **kwargs, ): self.block_size = block_size self.input_vocab_size = input_vocab_size self.output_vocab_size = output_vocab_size self.num_layers = num_layers self.num_heads = num_heads self.hidden_size = hidden_size self.dropout = dropout self.bias = bias self.use_cache = use_cache self.initializer_range = initializer_range super().__init__(**kwargs) @add_start_docstrings( BARK_SUBMODELCONFIG_START_DOCSTRING.format(config="BarkSemanticConfig", model="BarkSemanticModel"), """ Example: ```python >>> from transformers import BarkSemanticConfig, BarkSemanticModel >>> # Initializing a Bark sub-module style configuration >>> configuration = BarkSemanticConfig() >>> # Initializing a model (with random weights) from the suno/bark style configuration >>> model = BarkSemanticModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""", ) class BarkSemanticConfig(BarkSubModelConfig): model_type = "semantic" base_config_key = "semantic_config" @add_start_docstrings( BARK_SUBMODELCONFIG_START_DOCSTRING.format(config="BarkCoarseConfig", model="BarkCoarseModel"), """ Example: ```python >>> from transformers import BarkCoarseConfig, BarkCoarseModel >>> # Initializing a Bark sub-module style configuration >>> configuration = BarkCoarseConfig() >>> # Initializing a model (with random weights) from the suno/bark style configuration >>> model = BarkCoarseModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""", ) class BarkCoarseConfig(BarkSubModelConfig): model_type = "coarse_acoustics" base_config_key = "coarse_acoustics_config" @add_start_docstrings( BARK_SUBMODELCONFIG_START_DOCSTRING.format(config="BarkFineConfig", model="BarkFineModel"), """ n_codes_total (`int`, *optional*, defaults to 8): The total number of audio codebooks predicted. Used in the fine acoustics sub-model. n_codes_given (`int`, *optional*, defaults to 1): The number of audio codebooks predicted in the coarse acoustics sub-model. Used in the acoustics sub-models. Example: ```python >>> from transformers import BarkFineConfig, BarkFineModel >>> # Initializing a Bark sub-module style configuration >>> configuration = BarkFineConfig() >>> # Initializing a model (with random weights) from the suno/bark style configuration >>> model = BarkFineModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""", ) class BarkFineConfig(BarkSubModelConfig): model_type = "fine_acoustics" base_config_key = "fine_acoustics_config" def __init__(self, tie_word_embeddings=True, n_codes_total=8, n_codes_given=1, **kwargs): self.n_codes_total = n_codes_total self.n_codes_given = n_codes_given super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs) class BarkConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`BarkModel`]. It is used to instantiate a Bark model according to the specified sub-models configurations, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Bark [suno/bark](https://huggingface.co/suno/bark) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: semantic_config ([`BarkSemanticConfig`], *optional*): Configuration of the underlying semantic sub-model. coarse_acoustics_config ([`BarkCoarseConfig`], *optional*): Configuration of the underlying coarse acoustics sub-model. fine_acoustics_config ([`BarkFineConfig`], *optional*): Configuration of the underlying fine acoustics sub-model. codec_config ([`AutoConfig`], *optional*): Configuration of the underlying codec sub-model. Example: ```python >>> from transformers import ( ... BarkSemanticConfig, ... BarkCoarseConfig, ... BarkFineConfig, ... BarkModel, ... BarkConfig, ... AutoConfig, ... ) >>> # Initializing Bark sub-modules configurations. >>> semantic_config = BarkSemanticConfig() >>> coarse_acoustics_config = BarkCoarseConfig() >>> fine_acoustics_config = BarkFineConfig() >>> codec_config = AutoConfig.from_pretrained("facebook/encodec_24khz") >>> # Initializing a Bark module style configuration >>> configuration = BarkConfig.from_sub_model_configs( ... semantic_config, coarse_acoustics_config, fine_acoustics_config, codec_config ... ) >>> # Initializing a model (with random weights) >>> model = BarkModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "bark" sub_configs = { "semantic_config": BarkSemanticConfig, "coarse_acoustics_config": BarkCoarseConfig, "fine_acoustics_config": BarkFineConfig, "codec_config": AutoConfig, } def __init__( self, semantic_config: Dict = None, coarse_acoustics_config: Dict = None, fine_acoustics_config: Dict = None, codec_config: Dict = None, initializer_range=0.02, **kwargs, ): if semantic_config is None: semantic_config = {} logger.info("semantic_config is None. initializing the semantic model with default values.") if coarse_acoustics_config is None: coarse_acoustics_config = {} logger.info("coarse_acoustics_config is None. initializing the coarse model with default values.") if fine_acoustics_config is None: fine_acoustics_config = {} logger.info("fine_acoustics_config is None. initializing the fine model with default values.") if codec_config is None: codec_config = {} logger.info("codec_config is None. initializing the codec model with default values.") self.semantic_config = BarkSemanticConfig(**semantic_config) self.coarse_acoustics_config = BarkCoarseConfig(**coarse_acoustics_config) self.fine_acoustics_config = BarkFineConfig(**fine_acoustics_config) codec_model_type = codec_config["model_type"] if "model_type" in codec_config else "encodec" self.codec_config = CONFIG_MAPPING[codec_model_type](**codec_config) self.initializer_range = initializer_range super().__init__(**kwargs) @classmethod def from_sub_model_configs( cls, semantic_config: BarkSemanticConfig, coarse_acoustics_config: BarkCoarseConfig, fine_acoustics_config: BarkFineConfig, codec_config: PretrainedConfig, **kwargs, ): r""" Instantiate a [`BarkConfig`] (or a derived class) from bark sub-models configuration. Returns: [`BarkConfig`]: An instance of a configuration object """ return cls( semantic_config=semantic_config.to_dict(), coarse_acoustics_config=coarse_acoustics_config.to_dict(), fine_acoustics_config=fine_acoustics_config.to_dict(), codec_config=codec_config.to_dict(), **kwargs, ) __all__ = ["BarkCoarseConfig", "BarkConfig", "BarkFineConfig", "BarkSemanticConfig"]
transformers/src/transformers/models/bark/configuration_bark.py/0
{ "file_path": "transformers/src/transformers/models/bark/configuration_bark.py", "repo_id": "transformers", "token_count": 4331 }
94
# coding=utf-8 # Copyright 2021 Google Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch BigBird model.""" import math import os from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...generation import GenerationMixin from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_big_bird import BigBirdConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "google/bigbird-roberta-base" _CONFIG_FOR_DOC = "BigBirdConfig" _TRIVIA_QA_MAPPING = { "big_bird_attention": "attention/self", "output_layer_norm": "output/LayerNorm", "attention_output": "attention/output/dense", "output": "output/dense", "self_attention_layer_norm": "attention/output/LayerNorm", "intermediate": "intermediate/dense", "word_embeddings": "bert/embeddings/word_embeddings", "position_embedding": "bert/embeddings/position_embeddings", "type_embeddings": "bert/embeddings/token_type_embeddings", "embeddings": "bert/embeddings", "layer_normalization": "output/LayerNorm", "layer_norm": "LayerNorm", "trivia_qa_head": "qa_classifier", "dense": "intermediate/dense", "dense_1": "qa_outputs", } def load_tf_weights_in_big_bird(model, tf_checkpoint_path, is_trivia_qa=False): """Load tf checkpoints in a pytorch model.""" def load_tf_weights_bert(init_vars, tf_path): names = [] tf_weights = {} for name, shape in init_vars: array = tf.train.load_variable(tf_path, name) name = name.replace("bert/encoder/LayerNorm", "bert/embeddings/LayerNorm") logger.info(f"Loading TF weight {name} with shape {shape}") names.append(name) tf_weights[name] = array return names, tf_weights def load_tf_weights_trivia_qa(init_vars): names = [] tf_weights = {} for i, var in enumerate(init_vars): name_items = var.name.split("/") if "transformer_scaffold" in name_items[0]: layer_name_items = name_items[0].split("_") if len(layer_name_items) < 3: layer_name_items += [0] name_items[0] = f"bert/encoder/layer_{layer_name_items[2]}" name = "/".join([_TRIVIA_QA_MAPPING[x] if x in _TRIVIA_QA_MAPPING else x for x in name_items])[ :-2 ] # remove last :0 in variable if "self/attention/output" in name: name = name.replace("self/attention/output", "output") if i >= len(init_vars) - 2: name = name.replace("intermediate", "output") logger.info(f"Loading TF weight {name} with shape {var.shape}") array = var.value().numpy() names.append(name) tf_weights[name] = array return names, tf_weights try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f"Converting TensorFlow checkpoint from {tf_path}") # Load weights from TF model init_vars = tf.saved_model.load(tf_path).variables if is_trivia_qa else tf.train.list_variables(tf_path) if len(init_vars) <= 0: raise ValueError("Loaded trained variables cannot be empty.") pt_names = list(model.state_dict().keys()) if is_trivia_qa: names, tf_weights = load_tf_weights_trivia_qa(init_vars) else: names, tf_weights = load_tf_weights_bert(init_vars, tf_path) for txt_name in names: array = tf_weights[txt_name] name = txt_name.split("/") # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any( n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] for n in name ): logger.info(f"Skipping {'/'.join(name)}") continue pointer = model pt_name = [] for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "kernel" or scope_names[0] == "gamma": pointer = getattr(pointer, "weight") pt_name.append("weight") elif scope_names[0] == "output_bias" or scope_names[0] == "beta": pointer = getattr(pointer, "bias") pt_name.append("bias") elif scope_names[0] == "output_weights": pointer = getattr(pointer, "weight") pt_name.append("weight") elif scope_names[0] == "squad": pointer = getattr(pointer, "classifier") pt_name.append("classifier") elif scope_names[0] == "transform": pointer = getattr(pointer, "transform") pt_name.append("transform") if ("bias" in name) or ("kernel" in name): pointer = getattr(pointer, "dense") pt_name.append("dense") elif ("beta" in name) or ("gamma" in name): pointer = getattr(pointer, "LayerNorm") pt_name.append("LayerNorm") else: try: pointer = getattr(pointer, scope_names[0]) pt_name.append(f"{scope_names[0]}") except AttributeError: logger.info(f"Skipping {m_name}") continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] pt_name.append(f"{num}") if m_name[-11:] == "_embeddings" or m_name == "embeddings": pointer = getattr(pointer, "weight") pt_name.append("weight") elif m_name == "kernel": array = np.transpose(array) try: if len(array.shape) > len(pointer.shape) and math.prod(array.shape) == math.prod(pointer.shape): # print(txt_name, array.shape) if ( txt_name.endswith("attention/self/key/kernel") or txt_name.endswith("attention/self/query/kernel") or txt_name.endswith("attention/self/value/kernel") ): array = array.transpose(1, 0, 2).reshape(pointer.shape) elif txt_name.endswith("attention/output/dense/kernel"): array = array.transpose(0, 2, 1).reshape(pointer.shape) else: array = array.reshape(pointer.shape) if pointer.shape != array.shape: raise ValueError( f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched of {txt_name}." ) except ValueError as e: e.args += (pointer.shape, array.shape) raise pt_weight_name = ".".join(pt_name) logger.info(f"Initialize PyTorch weight {pt_weight_name} from {txt_name}.") pointer.data = torch.from_numpy(array) tf_weights.pop(txt_name, None) pt_names.remove(pt_weight_name) logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}.") logger.info(f"Weights not initialized in PyTorch model: {', '.join(pt_names)}.") return model class BigBirdEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__ def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False ) # End copy self.rescale_embeddings = config.rescale_embeddings self.hidden_size = config.hidden_size def forward( self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 ): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if self.rescale_embeddings: inputs_embeds = inputs_embeds * (self.hidden_size**0.5) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.dropout(embeddings) embeddings = self.LayerNorm(embeddings) return embeddings class BigBirdSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.is_decoder = config.is_decoder def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BigBirdModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs class BigBirdBlockSparseAttention(nn.Module): def __init__(self, config, seed=None): super().__init__() self.max_seqlen = config.max_position_embeddings self.seed = seed if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size {config.hidden_size} is not a multiple of the number of attention " f"heads {config.num_attention_heads}." ) self.num_attention_heads = config.num_attention_heads self.num_random_blocks = config.num_random_blocks self.block_size = config.block_size self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, band_mask=None, from_mask=None, to_mask=None, from_blocked_mask=None, to_blocked_mask=None, output_attentions=None, ): # Currently this `class` can't be used in decoder. batch_size, seqlen, _ = hidden_states.size() to_seq_length = from_seq_length = seqlen from_block_size = to_block_size = self.block_size if from_seq_length % from_block_size != 0: raise ValueError("Query sided sequence length must be multiple of block size") if to_seq_length % to_block_size != 0: raise ValueError("Key/Value sided sequence length must be multiple of block size") query_layer = self.transpose_for_scores(self.query(hidden_states)) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) context_layer, attention_probs = self.bigbird_block_sparse_attention( query_layer, key_layer, value_layer, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, self.num_attention_heads, self.num_random_blocks, self.attention_head_size, from_block_size, to_block_size, batch_size, from_seq_length, to_seq_length, seed=self.seed, plan_from_length=None, plan_num_rand_blocks=None, output_attentions=output_attentions, ) context_layer = context_layer.contiguous().view(batch_size, from_seq_length, -1) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs @staticmethod def torch_bmm_nd(inp_1, inp_2, ndim=None): """Fast nd matrix multiplication""" # faster replacement of torch.einsum ("bhqk,bhkd->bhqd") return torch.bmm(inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:])).view( inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 1]) ) @staticmethod def torch_bmm_nd_transpose(inp_1, inp_2, ndim=None): """Fast nd matrix multiplication with transpose""" # faster replacement of torch.einsum (bhqd,bhkd->bhqk) return torch.bmm( inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:]).transpose(1, 2) ).view(inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 2])) def bigbird_block_sparse_attention( self, query_layer, key_layer, value_layer, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, n_heads, n_rand_blocks, attention_head_size, from_block_size, to_block_size, batch_size, from_seq_len, to_seq_len, seed, plan_from_length, plan_num_rand_blocks, output_attentions, ): # BigBird block-sparse attention as suggested in paper # ITC: # global tokens: 2 x block_size # window tokens: 3 x block_size # random tokens: num_rand_tokens x block_size # ETC: # global tokens: extra_globals_tokens + 2 x block_size # window tokens: 3 x block_size # random tokens: num_rand_tokens x block_size # Note: # 1) Currently, ETC is not supported. # 2) Window size is fixed to 3 blocks & it can be changed only by # changing `block_size`. # 3) Number of global blocks are fixed (2 blocks here) & global tokens can be # controlled only by `block_size`. # attention is calculated separately for q[0], q[1], q[2:-2], q[-2], q[-1] in order to use special trick of shifting tokens (for calculating sliding attention) # hence following code can be divided into 5 parts. if from_seq_len // from_block_size != to_seq_len // to_block_size: raise ValueError("Error the number of blocks needs to be same!") rsqrt_d = 1 / math.sqrt(attention_head_size) bsz = batch_size attn_mask_penalty = -10000.0 # generate random attention and corresponding masks np.random.seed(seed) if from_seq_len in [1024, 3072, 4096]: # old plans used in paper rand_attn = [ self._bigbird_block_rand_mask( self.max_seqlen, self.max_seqlen, from_block_size, to_block_size, n_rand_blocks, last_idx=1024 )[: (from_seq_len // from_block_size - 2)] for _ in range(n_heads) ] else: if plan_from_length is None: plan_from_length, plan_num_rand_blocks = self._get_rand_attn_plan( from_seq_len, from_block_size, n_rand_blocks ) rand_attn = self._bigbird_block_rand_mask_with_head( from_seq_length=from_seq_len, to_seq_length=to_seq_len, from_block_size=from_block_size, to_block_size=to_block_size, num_heads=n_heads, plan_from_length=plan_from_length, plan_num_rand_blocks=plan_num_rand_blocks, ) rand_attn = np.stack(rand_attn, axis=0) rand_attn = torch.tensor(rand_attn, device=query_layer.device, dtype=torch.long) rand_attn.unsqueeze_(0) rand_attn = torch.cat([rand_attn for _ in range(batch_size)], dim=0) rand_mask = self._create_rand_mask_from_inputs( from_blocked_mask, to_blocked_mask, rand_attn, n_heads, n_rand_blocks, bsz, from_seq_len, from_block_size ) blocked_query_matrix = query_layer.view(bsz, n_heads, from_seq_len // from_block_size, from_block_size, -1) blocked_key_matrix = key_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1) blocked_value_matrix = value_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1) # preparing block for randn attn gathered_key = self.torch_gather_b2(blocked_key_matrix, rand_attn) gathered_key = gathered_key.view( bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1 ) # [bsz, n_heads, to_seq_len//to_block_size-2, n_rand_blocks, to_block_size, -1] gathered_value = self.torch_gather_b2(blocked_value_matrix, rand_attn) gathered_value = gathered_value.view( bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1 ) # [bsz, n_heads, to_seq_len//to_block_size-2, n_rand_blocks, to_block_size, -1] # 1st PART # 1st block (global block) attention scores # q[0] x (k[0], k[1], k[2], k[3], k[4] .... ) # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, to_seq_len] first_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 0], key_layer, ndim=4) first_product = first_product * rsqrt_d first_product += (1.0 - to_mask) * attn_mask_penalty first_attn_weights = nn.functional.softmax( first_product, dim=-1 ) # [bsz, n_heads, from_block_size, to_seq_len] # [bsz, n_heads, from_block_size, to_seq_len] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, -1] first_context_layer = self.torch_bmm_nd(first_attn_weights, value_layer, ndim=4) first_context_layer.unsqueeze_(2) # 2nd PART # 2nd block attention scores # q[1] x (sliding_keys, random_keys, global_keys) # sliding key blocks -> 2nd, 3rd blocks # global key blocks -> 1st block second_key_mat = torch.cat( [ blocked_key_matrix[:, :, 0], blocked_key_matrix[:, :, 1], blocked_key_matrix[:, :, 2], blocked_key_matrix[:, :, -1], gathered_key[:, :, 0], ], dim=2, ) # [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] second_value_mat = torch.cat( [ blocked_value_matrix[:, :, 0], blocked_value_matrix[:, :, 1], blocked_value_matrix[:, :, 2], blocked_value_matrix[:, :, -1], gathered_value[:, :, 0], ], dim=2, ) # [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] second_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 1], second_key_mat, ndim=4) second_seq_pad = torch.cat( [ to_mask[:, :, :, : 3 * to_block_size], to_mask[:, :, :, -to_block_size:], to_mask.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size]), ], dim=3, ) second_rand_pad = torch.cat( [ rand_mask.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]), rand_mask[:, :, 0], ], dim=3, ) second_product = second_product * rsqrt_d second_product += (1.0 - torch.minimum(second_seq_pad, second_rand_pad)) * attn_mask_penalty second_attn_weights = nn.functional.softmax( second_product, dim=-1 ) # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, -1] second_context_layer = self.torch_bmm_nd(second_attn_weights, second_value_mat, ndim=4) second_context_layer.unsqueeze_(2) # 3rd PART # Middle blocks attention scores # q[-2:2] x (sliding_keys, random_keys, global_keys) # sliding attn is calculated using special trick of shifting tokens as discussed in paper # random keys are generated by taking random indices as per `rand_attn` # global keys -> 1st & last block exp_blocked_key_matrix = torch.cat( [blocked_key_matrix[:, :, 1:-3], blocked_key_matrix[:, :, 2:-2], blocked_key_matrix[:, :, 3:-1]], dim=3 ) # [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1] exp_blocked_value_matrix = torch.cat( [blocked_value_matrix[:, :, 1:-3], blocked_value_matrix[:, :, 2:-2], blocked_value_matrix[:, :, 3:-1]], dim=3, ) # [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1] middle_query_matrix = blocked_query_matrix[:, :, 2:-2] # sliding attention scores for q[-2:2] # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [b, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1] inner_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, exp_blocked_key_matrix, ndim=5) # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, 3*to_block_size] inner_band_product = inner_band_product * rsqrt_d # randn attention scores for q[-2:2] # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, from_seq_len//from_block_size-4, n_rand_blocks*to_block_size, -1] rand_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, gathered_key[:, :, 1:-1], ndim=5) # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, n_rand_blocks*to_block_size] rand_band_product = rand_band_product * rsqrt_d # Including 1st block (since it's global) first_band_product = torch.einsum( "bhlqd,bhkd->bhlqk", middle_query_matrix, blocked_key_matrix[:, :, 0] ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] first_band_product = first_band_product * rsqrt_d # Including last block (since it's global) last_band_product = torch.einsum( "bhlqd,bhkd->bhlqk", middle_query_matrix, blocked_key_matrix[:, :, -1] ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] last_band_product = last_band_product * rsqrt_d # masking padded tokens inner_band_product += (1.0 - band_mask) * attn_mask_penalty first_band_product += (1.0 - to_mask[:, :, :, :to_block_size].unsqueeze(3)) * attn_mask_penalty last_band_product += (1.0 - to_mask[:, :, :, -to_block_size:].unsqueeze(3)) * attn_mask_penalty rand_band_product += (1.0 - rand_mask[:, :, 1:-1]) * attn_mask_penalty # completing attention scores matrix for all q[-2:2] band_product = torch.cat( [first_band_product, inner_band_product, rand_band_product, last_band_product], dim=-1 ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, (5+n_rand_blocks)*to_block_size] # safely doing softmax since attention matrix is completed attn_weights = nn.functional.softmax( band_product, dim=-1 ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, (5+n_rand_blocks)*to_block_size] # contribution of sliding keys # [bsz, n_heads, m//from_block_size-4, from_block_size, 3*to_block_size] x [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1] context_layer = self.torch_bmm_nd( attn_weights[:, :, :, :, to_block_size : 4 * to_block_size], exp_blocked_value_matrix, ndim=5 ) # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] # adding contribution of random keys # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, n_rand_blocks*to_block_size] x [bsz, n_heads, from_seq_len//from_block_size-4, n_rand_blocks*to_block_size, -1] context_layer += self.torch_bmm_nd( attn_weights[:, :, :, :, 4 * to_block_size : -to_block_size], gathered_value[:, :, 1:-1], ndim=5 ) # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] # adding contribution of global keys context_layer += torch.einsum( "bhlqk,bhkd->bhlqd", attn_weights[:, :, :, :, :to_block_size], blocked_value_matrix[:, :, 0] ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] context_layer += torch.einsum( "bhlqk,bhkd->bhlqd", attn_weights[:, :, :, :, -to_block_size:], blocked_value_matrix[:, :, -1] ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] # 4th PART # last 2nd token attention scores # q[-2] x (sliding_keys, random_keys, global_keys) # sliding key blocks -> last 3 blocks # global key block -> 1st block # random key block -> based on indices stored in `randn_attn` second_last_key_mat = torch.cat( [ blocked_key_matrix[:, :, 0], blocked_key_matrix[:, :, -3], blocked_key_matrix[:, :, -2], blocked_key_matrix[:, :, -1], gathered_key[:, :, -1], ], dim=2, ) # [bsz, n_heads, (4+n_random_blocks)*to_block_size, -1] second_last_value_mat = torch.cat( [ blocked_value_matrix[:, :, 0], blocked_value_matrix[:, :, -3], blocked_value_matrix[:, :, -2], blocked_value_matrix[:, :, -1], gathered_value[:, :, -1], ], dim=2, ) # [bsz, n_heads, (4+r)*to_block_size, -1] # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] second_last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -2], second_last_key_mat, ndim=4) second_last_seq_pad = torch.cat( [ to_mask[:, :, :, :to_block_size], to_mask[:, :, :, -3 * to_block_size :], to_mask.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size]), ], dim=3, ) second_last_rand_pad = torch.cat( [ rand_mask.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]), rand_mask[:, :, -1], ], dim=3, ) second_last_product = second_last_product * rsqrt_d second_last_product += (1.0 - torch.minimum(second_last_seq_pad, second_last_rand_pad)) * attn_mask_penalty second_last_attn_weights = nn.functional.softmax( second_last_product, dim=-1 ) # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, -1] second_last_context_layer = self.torch_bmm_nd(second_last_attn_weights, second_last_value_mat, ndim=4) second_last_context_layer.unsqueeze_(2) # 5th PART # last block (global) attention scores # q[-1] x (k[0], k[1], k[2], k[3], .... ) # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, to_seq_len] last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -1], key_layer, ndim=4) last_product = last_product * rsqrt_d last_product += (1.0 - to_mask) * attn_mask_penalty last_attn_weights = nn.functional.softmax(last_product, dim=-1) # [bsz, n_heads, from_block_size, n] # [bsz, n_heads, from_block_size, to_seq_len] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, -1] last_context_layer = self.torch_bmm_nd(last_attn_weights, value_layer, ndim=4) last_context_layer.unsqueeze_(2) # combining representations of all tokens context_layer = torch.cat( [first_context_layer, second_context_layer, context_layer, second_last_context_layer, last_context_layer], dim=2, ) context_layer = context_layer.view((bsz, n_heads, from_seq_len, -1)) * from_mask context_layer = torch.transpose(context_layer, 1, 2) # this is just for visualizing; forward pass doesn't depend on following code if output_attentions: # TODO(PVP): need to verify if below code is correct attention_probs = torch.zeros( bsz, n_heads, from_seq_len, to_seq_len, dtype=torch.float, device=context_layer.device ) # 1st query block # corresponding to `first_context_layer` attention_probs[:, :, :from_block_size, :] = first_attn_weights # all keys global # 2nd query block # corresponding to `second_context_layer` attention_probs[:, :, from_block_size : 2 * from_block_size, : 3 * to_block_size] = second_attn_weights[ :, :, :, : 3 * to_block_size ] # 1st three key blocks (global + sliding) attention_probs[:, :, from_block_size : 2 * from_block_size, -to_block_size:] = second_attn_weights[ :, :, :, 3 * to_block_size : 4 * to_block_size ] # last key block (global) # random keys for p1, i1, w1 in zip(range(bsz), rand_attn, second_attn_weights): # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch for p2, i2, w2 in zip(range(n_heads), i1, w1): # p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads attn_probs_view = attention_probs.view( bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size, ) right_slice = w2[:, 4 * to_block_size :] attn_probs_view[p1, p2, 1, :, i2[0]] = right_slice.view( from_block_size, n_rand_blocks, to_block_size ) # Middle query blocks # corresponding to `context_layer` # sliding keys for q_idx in range(from_seq_len // from_block_size - 4): attn_probs_view = attention_probs.view( bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size, )[:, :, 2:-2, :, 1:-1, :] right_slice = attn_weights[:, :, q_idx, :, to_block_size : 4 * to_block_size] attn_probs_view[:, :, q_idx, :, q_idx : q_idx + 3, :] = right_slice.view( bsz, n_heads, from_block_size, 3, to_block_size ) # inner_band_product # global keys (corresponding to 1st key block) attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, :to_block_size] = attn_weights[ :, :, :, :, :to_block_size ].view(bsz, n_heads, -1, to_block_size) # first_band_product # global keys (corresponding to last key block) attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, -to_block_size:] = attn_weights[ :, :, :, :, -to_block_size: ].view(bsz, n_heads, -1, to_block_size) # last_band_product # random keys for p1, i1, w1 in zip(range(bsz), rand_attn, attn_weights): # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch for p2, i2, w2 in zip(range(n_heads), i1, w1): # p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads for q_idx in range(1, len(i2) - 1): attn_probs_view = attention_probs.view( bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size, ) right_slice = w2[q_idx - 1, :, 4 * to_block_size : -to_block_size] attn_probs_view[p1, p2, q_idx + 1, :, i2[q_idx]] = right_slice.view( from_block_size, n_rand_blocks, to_block_size ) # Second-last query block # corresponding to `second_last_context_layer` attention_probs[:, :, -2 * from_block_size : -from_block_size, :to_block_size] = second_last_attn_weights[ :, :, :, :to_block_size ] # 1st key block (global) attention_probs[:, :, -2 * from_block_size : -from_block_size, -3 * to_block_size :] = ( second_last_attn_weights[:, :, :, to_block_size : 4 * to_block_size] ) # last three blocks (global + sliding) # random keys for p1, i1, w1 in zip(range(bsz), rand_attn, second_last_attn_weights): # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch for p2, i2, w2 in zip(range(n_heads), i1, w1): # p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads attn_probs_view = attention_probs.view( bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size, ) right_slice = w2[:, 4 * to_block_size :] attn_probs_view[p1, p2, -2, :, i2[-1]] = right_slice.view( from_block_size, n_rand_blocks, to_block_size ) # last query block # corresponding to `last_context_layer` attention_probs[:, :, -from_block_size:, :] = last_attn_weights # all keys global else: attention_probs = None return context_layer, attention_probs @staticmethod def torch_gather_b2(params, indices): # this operation is equivalent to tf.gather when batch_dims=2 if params.shape[:2] != indices.shape[:2]: raise ValueError( "Make sure that the first two dimensions of params and indices are identical, but" f" they are params: {params.shape[:2]} vs. indices: {indices.shape[:2]}" ) num_indices_to_gather = indices.shape[-2] * indices.shape[-1] num_indices_to_pick_from = params.shape[2] shift = torch.arange(indices.shape[0] * indices.shape[1] * num_indices_to_gather, device=indices.device) indices_shift = torch.div(shift, num_indices_to_gather, rounding_mode="floor") * num_indices_to_pick_from flattened_indices = indices.view(-1) + indices_shift flattened_params = params.reshape(-1, params.shape[-2], params.shape[-1]) out_flattened = flattened_params.index_select(0, flattened_indices) out = out_flattened.reshape(params.shape[:2] + (num_indices_to_gather,) + params.shape[3:]) return out @staticmethod def _create_rand_mask_from_inputs( from_blocked_mask, to_blocked_mask, rand_attn, num_attention_heads, num_rand_blocks, batch_size, from_seq_length, from_block_size, ): """ Create 3D attention mask from a 2D tensor mask. Args: from_blocked_mask: 2D Tensor of shape [batch_size, from_seq_length//from_block_size, from_block_size]. to_blocked_mask: int32 Tensor of shape [batch_size, to_seq_length//to_block_size, to_block_size]. rand_attn: [batch_size, num_attention_heads, from_seq_length//from_block_size-2, num_rand_blocks] num_attention_heads: int. Number of attention heads. num_rand_blocks: int. Number of random chunks per row. batch_size: int. Batch size for computation. from_seq_length: int. length of from sequence. from_block_size: int. size of block in from sequence. Returns: float Tensor of shape [batch_size, num_attention_heads, from_seq_length//from_block_size-2, from_block_size, num_rand_blocks*to_block_size]. """ num_windows = from_seq_length // from_block_size - 2 rand_mask = torch.stack([p1[i1.flatten()] for p1, i1 in zip(to_blocked_mask, rand_attn)]) rand_mask = rand_mask.view(batch_size, num_attention_heads, num_windows, num_rand_blocks * from_block_size) rand_mask = torch.einsum("blq,bhlk->bhlqk", from_blocked_mask[:, 1:-1], rand_mask) return rand_mask @staticmethod def _get_rand_attn_plan(from_seq_length, from_block_size, num_rand_blocks): """ Gives the plan of where to put random attention. Args: from_seq_length: int. length of from sequence. from_block_size: int. size of block in from sequence. num_rand_blocks: int. Number of random chunks per row. Returns: plan_from_length: ending location of from block plan_num_rand_blocks: number of random ending location for each block """ plan_from_length = [] plan_num_rand_blocks = [] if (2 * num_rand_blocks + 5) < (from_seq_length // from_block_size): plan_from_length.append(int((2 * num_rand_blocks + 5) * from_block_size)) plan_num_rand_blocks.append(num_rand_blocks) plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(0) elif (num_rand_blocks + 5) < (from_seq_length // from_block_size): plan_from_length.append(int((num_rand_blocks + 5) * from_block_size)) plan_num_rand_blocks.append(num_rand_blocks // 2) plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(num_rand_blocks - (num_rand_blocks // 2)) else: plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(num_rand_blocks) return plan_from_length, plan_num_rand_blocks def _bigbird_block_rand_mask( self, from_seq_length, to_seq_length, from_block_size, to_block_size, num_rand_blocks, last_idx=-1 ): """ Create adjacency list of random attention. Args: from_seq_length: int. length of from sequence. to_seq_length: int. length of to sequence. from_block_size: int. size of block in from sequence. to_block_size: int. size of block in to sequence. num_rand_blocks: int. Number of random chunks per row. last_idx: if -1 then num_rand_blocks blocks chosen anywhere in to sequence, if positive then num_rand_blocks blocks chosen only up to last_idx. Returns: adjacency list of size from_seq_length//from_block_size-2 by num_rand_blocks """ # using this method when from_seq_length in [1024, 3072, 4096] if from_seq_length // from_block_size != to_seq_length // to_block_size: raise ValueError("Error the number of blocks needs to be same!") rand_attn = np.zeros((from_seq_length // from_block_size - 2, num_rand_blocks), dtype=np.int32) # During inference (eval) no randomness if not self.training: return rand_attn middle_seq = np.arange(1, to_seq_length // to_block_size - 1, dtype=np.int32) last = to_seq_length // to_block_size - 1 if last_idx > (2 * to_block_size): last = (last_idx // to_block_size) - 1 r = num_rand_blocks # shorthand for i in range(1, from_seq_length // from_block_size - 1): start = i - 2 end = i if i == 1: rand_attn[i - 1, :] = np.random.permutation(middle_seq[2:last])[:r] elif i == 2: rand_attn[i - 1, :] = np.random.permutation(middle_seq[3:last])[:r] elif i == from_seq_length // from_block_size - 3: rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r] # Missing -3: should have been sliced till last-3 elif i == from_seq_length // from_block_size - 2: rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r] # Missing -4: should have been sliced till last-4 else: if start > last: start = last rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r] elif (end + 1) == last: rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r] else: rand_attn[i - 1, :] = np.random.permutation( np.concatenate((middle_seq[:start], middle_seq[end + 1 : last])) )[:r] return rand_attn def _bigbird_block_rand_mask_with_head( self, from_seq_length, to_seq_length, from_block_size, to_block_size, num_heads, plan_from_length, plan_num_rand_blocks, window_block_left=1, window_block_right=1, global_block_top=1, global_block_bottom=1, global_block_left=1, global_block_right=1, ): """ Create adjacency list of random attention. Args: from_seq_length: int. length of from sequence. to_seq_length: int. length of to sequence. from_block_size: int. size of block in from sequence. to_block_size: int. size of block in to sequence. num_heads: int. total number of heads. plan_from_length: list. plan from length where num_random_blocks are chosen from. plan_num_rand_blocks: list. number of rand blocks within the plan. window_block_left: int. number of blocks of window to left of a block. window_block_right: int. number of blocks of window to right of a block. global_block_top: int. number of blocks at the top. global_block_bottom: int. number of blocks at the bottom. global_block_left: int. Number of blocks globally used to the left. global_block_right: int. Number of blocks globally used to the right. Returns: adjacency list of size num_head where each element is of size from_seq_length//from_block_size-2 by num_rand_blocks """ # using this method when from_seq_length not in [1024, 3072, 4096] if from_seq_length // from_block_size != to_seq_length // to_block_size: raise ValueError("Error the number of blocks needs to be same!") if from_seq_length not in plan_from_length: raise ValueError("Error from sequence length not in plan!") # Total number of blocks in the mmask num_blocks = from_seq_length // from_block_size # Number of blocks per plan plan_block_length = np.array(plan_from_length) // from_block_size # till when to follow plan max_plan_idx = plan_from_length.index(from_seq_length) # Random Attention adjacency list rand_attn = [ np.zeros((num_blocks, np.sum(plan_num_rand_blocks[: max_plan_idx + 1])), dtype=np.int32) for i in range(num_heads) ] # During inference (eval) no randomness if not self.training: for nh in range(num_heads): rand_attn[nh] = rand_attn[nh][global_block_top : num_blocks - global_block_bottom, :] return rand_attn # We will go iteratively over the plan blocks and pick random number of # Attention blocks from the legally allowed blocks for plan_idx in range(max_plan_idx + 1): rnd_r_cnt = 0 if plan_idx > 0: # set the row for all from_blocks starting from 0 to # plan_block_length[plan_idx-1] # column indx start fromm plan_block_length[plan_idx-1] and ends at # plan_block_length[plan_idx] if plan_num_rand_blocks[plan_idx] > 0: rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx])) curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1])) for blk_rw_idx in range(global_block_top, plan_block_length[plan_idx - 1]): for h in range(num_heads): rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention( block_id=blk_rw_idx, to_start_block_id=plan_block_length[plan_idx - 1], to_end_block_id=plan_block_length[plan_idx], num_rand_blocks=plan_num_rand_blocks[plan_idx], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right, ) for pl_id in range(plan_idx): if plan_num_rand_blocks[pl_id] == 0: continue for blk_rw_idx in range(plan_block_length[plan_idx - 1], plan_block_length[plan_idx]): rnd_r_cnt = 0 to_start_block_id = 0 if pl_id > 0: rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:pl_id])) to_start_block_id = plan_block_length[pl_id - 1] curr_r_cnt = int(np.sum(plan_num_rand_blocks[: pl_id + 1])) for h in range(num_heads): rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention( block_id=blk_rw_idx, to_start_block_id=to_start_block_id, to_end_block_id=plan_block_length[pl_id], num_rand_blocks=plan_num_rand_blocks[pl_id], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right, ) if plan_num_rand_blocks[plan_idx] == 0: continue curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1])) from_start_block_id = global_block_top to_start_block_id = 0 if plan_idx > 0: rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx])) from_start_block_id = plan_block_length[plan_idx - 1] to_start_block_id = plan_block_length[plan_idx - 1] for blk_rw_idx in range(from_start_block_id, plan_block_length[plan_idx]): for h in range(num_heads): rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention( block_id=blk_rw_idx, to_start_block_id=to_start_block_id, to_end_block_id=plan_block_length[plan_idx], num_rand_blocks=plan_num_rand_blocks[plan_idx], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right, ) for nh in range(num_heads): rand_attn[nh] = rand_attn[nh][global_block_top : num_blocks - global_block_bottom, :] return rand_attn @staticmethod def _get_single_block_row_attention( block_id, to_start_block_id, to_end_block_id, num_rand_blocks, window_block_left=1, window_block_right=1, global_block_left=1, global_block_right=1, ): """ For a single row block get random row attention. Args: block_id: int. block id of row. to_start_block_id: int. random attention column start id. to_end_block_id: int. random attention column end id. num_rand_blocks: int. number of random blocks to be selected. window_block_left: int. number of blocks of window to left of a block. window_block_right: int. number of blocks of window to right of a block. global_block_left: int. Number of blocks globally used to the left. global_block_right: int. Number of blocks globally used to the right. Returns: row containing the random attention vector of size num_rand_blocks. """ # list of to_blocks from which to choose random attention to_block_list = np.arange(to_start_block_id, to_end_block_id, dtype=np.int32) # permute the blocks perm_block = np.random.permutation(to_block_list) # illegal blocks for the current block id, using window illegal_blocks = list(range(block_id - window_block_left, block_id + window_block_right + 1)) # Add blocks at the start and at the end illegal_blocks.extend(list(range(global_block_left))) illegal_blocks.extend(list(range(to_end_block_id - global_block_right, to_end_block_id))) # The second from_block cannot choose random attention on second last to_block if block_id == 1: illegal_blocks.append(to_end_block_id - 2) # The second last from_block cannot choose random attention on second to_block if block_id == to_end_block_id - 2: illegal_blocks.append(1) selected_random_blokcs = [] for i in range(to_end_block_id - to_start_block_id): if perm_block[i] not in illegal_blocks: selected_random_blokcs.append(perm_block[i]) if len(selected_random_blokcs) == num_rand_blocks: break return np.array(selected_random_blokcs, dtype=np.int32) # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->BigBird class BigBirdSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BigBirdAttention(nn.Module): def __init__(self, config, seed=None): super().__init__() self.attention_type = config.attention_type self.config = config self.seed = seed if self.config.attention_type == "original_full": self.self = BigBirdSelfAttention(config) elif self.config.attention_type == "block_sparse": self.self = BigBirdBlockSparseAttention(config, seed) else: raise ValueError( f"attention_type can either be original_full or block_sparse, but is {self.config.attention_type}" ) self.output = BigBirdSelfOutput(config) def set_attention_type(self, value: str): if value not in ["original_full", "block_sparse"]: raise ValueError( f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}" ) # attention type is already correctly set if value == self.attention_type: return self.attention_type = value if value == "original_full": # copy all weights to new full attention class attn_weights = BigBirdSelfAttention(self.config) else: # copy all weights to new sparse attention class attn_weights = BigBirdBlockSparseAttention(self.config, self.seed) attn_weights.query = self.self.query attn_weights.value = self.self.value attn_weights.key = self.self.key self.self = attn_weights self.attention_type = value if not self.training: self.self.eval() def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, # block_sparse config band_mask=None, from_mask=None, to_mask=None, from_blocked_mask=None, to_blocked_mask=None, ): # fp16 compatibility if band_mask is not None: band_mask = band_mask.to(hidden_states.dtype) if from_mask is not None: from_mask = from_mask.to(hidden_states.dtype) if to_mask is not None: to_mask = to_mask.to(hidden_states.dtype) if self.attention_type == "original_full": self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) else: if encoder_hidden_states is not None: raise ValueError("BigBird cannot be used as a decoder when config.attention_type != 'original_full'") self_outputs = self.self( hidden_states, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, output_attentions ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->BigBird class BigBirdIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->BigBird class BigBirdOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BigBirdLayer(nn.Module): def __init__(self, config, seed=None): super().__init__() self.config = config self.attention_type = config.attention_type self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = BigBirdAttention(config, seed=seed) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise TypeError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = BigBirdAttention(config) self.intermediate = BigBirdIntermediate(config) self.output = BigBirdOutput(config) def set_attention_type(self, value: str): if value not in ["original_full", "block_sparse"]: raise ValueError( f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}" ) # attention type is already correctly set if value == self.attention_type: return self.attention_type = value self.attention.set_attention_type(value) if self.add_cross_attention: self.crossattention.set_attention_type(value) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, band_mask=None, from_mask=None, to_mask=None, blocked_encoder_mask=None, past_key_value=None, output_attentions=False, ): # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=self_attn_past_key_value, output_attentions=output_attentions, band_mask=band_mask, from_mask=from_mask, to_mask=to_mask, from_blocked_mask=blocked_encoder_mask, to_blocked_mask=blocked_encoder_mask, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with " " cross-attention layers by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class BigBirdEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.attention_type = config.attention_type self.layer = nn.ModuleList( [BigBirdLayer(config, seed=layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.gradient_checkpointing = False def set_attention_type(self, value: str): if value not in ["original_full", "block_sparse"]: raise ValueError( f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}" ) # attention type is already correctly set if value == self.attention_type: return self.attention_type = value for layer in self.layer: layer.set_attention_type(value) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, band_mask=None, from_mask=None, to_mask=None, blocked_encoder_mask=None, return_dict=True, ) -> Union[BaseModelOutputWithPastAndCrossAttentions, Tuple]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, band_mask, from_mask, to_mask, blocked_encoder_mask, past_key_value, output_attentions, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, band_mask, from_mask, to_mask, blocked_encoder_mask, past_key_value, output_attentions, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->BigBird class BigBirdPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->BigBird class BigBirdLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = BigBirdPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def _tie_weights(self): self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->BigBird class BigBirdOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = BigBirdLMPredictionHead(config) def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: prediction_scores = self.predictions(sequence_output) return prediction_scores # Copied from transformers.models.bert.modeling_bert.BertOnlyNSPHead with Bert->BigBird class BigBirdOnlyNSPHead(nn.Module): def __init__(self, config): super().__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score # Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->BigBird class BigBirdPreTrainingHeads(nn.Module): def __init__(self, config): super().__init__() self.predictions = BigBirdLMPredictionHead(config) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score class BigBirdPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BigBirdConfig load_tf_weights = load_tf_weights_in_big_bird base_model_prefix = "bert" supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) BIG_BIRD_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`BigBirdConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ BIG_BIRD_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @dataclass class BigBirdForPreTrainingOutput(ModelOutput): """ Output type of [`BigBirdForPreTraining`]. Args: loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss. prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`): Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None prediction_logits: torch.FloatTensor = None seq_relationship_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class BigBirdForQuestionAnsweringModelOutput(ModelOutput): """ Base class for outputs of question answering models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Span-start scores (before SoftMax). end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Span-end scores (before SoftMax). pooler_output (`torch.FloatTensor` of shape `(batch_size, 1)`): pooler output from BigBigModel hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None start_logits: torch.FloatTensor = None end_logits: torch.FloatTensor = None pooler_output: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @add_start_docstrings( "The bare BigBird Model transformer outputting raw hidden-states without any specific head on top.", BIG_BIRD_START_DOCSTRING, ) class BigBirdModel(BigBirdPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in [Attention is all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. """ def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.attention_type = self.config.attention_type self.config = config self.block_size = self.config.block_size self.embeddings = BigBirdEmbeddings(config) self.encoder = BigBirdEncoder(config) if add_pooling_layer: self.pooler = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() else: self.pooler = None self.activation = None if self.attention_type != "original_full" and config.add_cross_attention: logger.warning( "When using `BigBirdForCausalLM` as decoder, then `attention_type` must be `original_full`. Setting" " `attention_type=original_full`" ) self.set_attention_type("original_full") # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def set_attention_type(self, value: str): if value not in ["original_full", "block_sparse"]: raise ValueError( f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}" ) # attention type is already correctly set if value == self.attention_type: return self.attention_type = value self.encoder.set_attention_type(value) @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[BaseModelOutputWithPoolingAndCrossAttentions, Tuple[torch.FloatTensor]]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) if token_type_ids is None: if hasattr(self.embeddings, "token_type_ids"): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # in order to use block_sparse attention, sequence_length has to be at least # bigger than all global attentions: 2 * block_size # + sliding tokens: 3 * block_size # + random tokens: 2 * num_random_blocks * block_size max_tokens_to_attend = (5 + 2 * self.config.num_random_blocks) * self.config.block_size if self.attention_type == "block_sparse" and seq_length <= max_tokens_to_attend: # change attention_type from block_sparse to original_full sequence_length = input_ids.size(1) if input_ids is not None else inputs_embeds.size(1) logger.warning( "Attention type 'block_sparse' is not possible if sequence_length: " f"{sequence_length} <= num global tokens: 2 * config.block_size " "+ min. num sliding tokens: 3 * config.block_size " "+ config.num_random_blocks * config.block_size " "+ additional buffer: config.num_random_blocks * config.block_size " f"= {max_tokens_to_attend} with config.block_size " f"= {self.config.block_size}, config.num_random_blocks " f"= {self.config.num_random_blocks}. " "Changing attention type to 'original_full'..." ) self.set_attention_type("original_full") if self.attention_type == "block_sparse": ( padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds, ) = self._pad_to_block_size( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, pad_token_id=self.config.pad_token_id, ) else: padding_len = 0 if self.attention_type == "block_sparse": blocked_encoder_mask, band_mask, from_mask, to_mask = self.create_masks_for_block_sparse_attn( attention_mask, self.block_size ) extended_attention_mask = None elif self.attention_type == "original_full": blocked_encoder_mask = None band_mask = None from_mask = None to_mask = None # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) else: raise ValueError( f"attention_type can either be original_full or block_sparse, but is {self.attention_type}" ) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, band_mask=band_mask, from_mask=from_mask, to_mask=to_mask, blocked_encoder_mask=blocked_encoder_mask, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooler_output = self.activation(self.pooler(sequence_output[:, 0, :])) if (self.pooler is not None) else None # undo padding if padding_len > 0: # unpad `sequence_output` because the calling function is expecting a length == input_ids.size(1) sequence_output = sequence_output[:, :-padding_len] if not return_dict: return (sequence_output, pooler_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooler_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) @staticmethod def create_masks_for_block_sparse_attn(attention_mask: torch.Tensor, block_size: int): batch_size, seq_length = attention_mask.size() if seq_length % block_size != 0: raise ValueError( f"Sequence length must be multiple of block size, but sequence length is {seq_length}, while block" f" size is {block_size}." ) def create_band_mask_from_inputs(from_blocked_mask, to_blocked_mask): """ Create 3D attention mask from a 2D tensor mask. Args: from_blocked_mask: 2D Tensor of shape [batch_size, from_seq_length//from_block_size, from_block_size]. to_blocked_mask: int32 Tensor of shape [batch_size, to_seq_length//to_block_size, to_block_size]. Returns: float Tensor of shape [batch_size, 1, from_seq_length//from_block_size-4, from_block_size, 3*to_block_size]. """ exp_blocked_to_pad = torch.cat( [to_blocked_mask[:, 1:-3], to_blocked_mask[:, 2:-2], to_blocked_mask[:, 3:-1]], dim=2 ) band_mask = torch.einsum("blq,blk->blqk", from_blocked_mask[:, 2:-2], exp_blocked_to_pad) band_mask.unsqueeze_(1) return band_mask blocked_encoder_mask = attention_mask.view(batch_size, seq_length // block_size, block_size) band_mask = create_band_mask_from_inputs(blocked_encoder_mask, blocked_encoder_mask) from_mask = attention_mask.view(batch_size, 1, seq_length, 1) to_mask = attention_mask.view(batch_size, 1, 1, seq_length) return blocked_encoder_mask, band_mask, from_mask, to_mask def _pad_to_block_size( self, input_ids: torch.Tensor, attention_mask: torch.Tensor, token_type_ids: torch.Tensor, position_ids: torch.Tensor, inputs_embeds: torch.Tensor, pad_token_id: int, ): """A helper function to pad tokens and mask to work with implementation of BigBird block-sparse attention.""" # padding block_size = self.config.block_size input_shape = input_ids.shape if input_ids is not None else inputs_embeds.shape batch_size, seq_len = input_shape[:2] padding_len = (block_size - seq_len % block_size) % block_size if padding_len > 0: logger.warning_once( f"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of " f"`config.block_size`: {block_size}" ) if input_ids is not None: input_ids = nn.functional.pad(input_ids, (0, padding_len), value=pad_token_id) if position_ids is not None: # pad with position_id = pad_token_id as in modeling_bigbird.BigBirdEmbeddings position_ids = nn.functional.pad(position_ids, (0, padding_len), value=pad_token_id) if inputs_embeds is not None: input_ids_padding = inputs_embeds.new_full( (batch_size, padding_len), self.config.pad_token_id, dtype=torch.long, ) inputs_embeds_padding = self.embeddings(input_ids_padding) inputs_embeds = torch.cat([inputs_embeds, inputs_embeds_padding], dim=-2) attention_mask = nn.functional.pad( attention_mask, (0, padding_len), value=False ) # no attention on the padding tokens token_type_ids = nn.functional.pad(token_type_ids, (0, padding_len), value=0) # pad with token_type_id = 0 return padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds class BigBirdForPreTraining(BigBirdPreTrainedModel): _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] def __init__(self, config): super().__init__(config) self.bert = BigBirdModel(config, add_pooling_layer=True) self.cls = BigBirdPreTrainingHeads(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=BigBirdForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.FloatTensor] = None, next_sentence_label: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[BigBirdForPreTrainingOutput, Tuple[torch.FloatTensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the next sequence prediction (classification) loss. If specified, nsp loss will be added to masked_lm loss. Input should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`: - 0 indicates sequence B is a continuation of sequence A, - 1 indicates sequence B is a random sequence. kwargs (`Dict[str, any]`, *optional*, defaults to `{}`): Used to hide legacy arguments that have been deprecated. Returns: Example: ```python >>> from transformers import AutoTokenizer, BigBirdForPreTraining >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("google/bigbird-roberta-base") >>> model = BigBirdForPreTraining.from_pretrained("google/bigbird-roberta-base") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.prediction_logits >>> seq_relationship_logits = outputs.seq_relationship_logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output, pooled_output = outputs[:2] prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) total_loss = None if labels is not None: loss_fct = CrossEntropyLoss() total_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if next_sentence_label is not None and total_loss is not None: next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) total_loss = total_loss + next_sentence_loss if not return_dict: output = (prediction_scores, seq_relationship_score) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return BigBirdForPreTrainingOutput( loss=total_loss, prediction_logits=prediction_scores, seq_relationship_logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings("""BigBird Model with a `language modeling` head on top.""", BIG_BIRD_START_DOCSTRING) class BigBirdForMaskedLM(BigBirdPreTrainedModel): _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] def __init__(self, config): super().__init__(config) if config.is_decoder: logger.warning( "If you want to use `BigBirdForMaskedLM` make sure `config.is_decoder=False` for " "bi-directional self-attention." ) self.bert = BigBirdModel(config) self.cls = BigBirdOnlyMLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[MaskedLMOutput, Tuple[torch.FloatTensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Example: ```python >>> import torch >>> from transformers import AutoTokenizer, BigBirdForMaskedLM >>> from datasets import load_dataset >>> tokenizer = AutoTokenizer.from_pretrained("google/bigbird-roberta-base") >>> model = BigBirdForMaskedLM.from_pretrained("google/bigbird-roberta-base") >>> squad_ds = load_dataset("rajpurkar/squad_v2", split="train") # doctest: +IGNORE_RESULT >>> # select random long article >>> LONG_ARTICLE_TARGET = squad_ds[81514]["context"] >>> # select random sentence >>> LONG_ARTICLE_TARGET[332:398] 'the highest values are very close to the theoretical maximum value' >>> # add mask_token >>> LONG_ARTICLE_TO_MASK = LONG_ARTICLE_TARGET.replace("maximum", "[MASK]") >>> inputs = tokenizer(LONG_ARTICLE_TO_MASK, return_tensors="pt") >>> # long article input >>> list(inputs["input_ids"].shape) [1, 919] >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> # retrieve index of [MASK] >>> mask_token_index = (inputs.input_ids == tokenizer.mask_token_id)[0].nonzero(as_tuple=True)[0] >>> predicted_token_id = logits[0, mask_token_index].argmax(axis=-1) >>> tokenizer.decode(predicted_token_id) 'maximum' ``` ```python >>> labels = tokenizer(LONG_ARTICLE_TARGET, return_tensors="pt")["input_ids"] >>> labels = torch.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100) >>> outputs = model(**inputs, labels=labels) >>> round(outputs.loss.item(), 2) 1.99 ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs): input_shape = input_ids.shape effective_batch_size = input_shape[0] # add a dummy token if self.config.pad_token_id is None: raise ValueError("The PAD token should be defined for generation") attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1) dummy_token = torch.full( (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device ) input_ids = torch.cat([input_ids, dummy_token], dim=1) return {"input_ids": input_ids, "attention_mask": attention_mask} @add_start_docstrings( """BigBird Model with a `language modeling` head on top for CLM fine-tuning.""", BIG_BIRD_START_DOCSTRING ) class BigBirdForCausalLM(BigBirdPreTrainedModel, GenerationMixin): _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] def __init__(self, config): super().__init__(config) if not config.is_decoder: logger.warning("If you want to use `BigBirdForCausalLM` as a standalone, add `is_decoder=True.`") self.bert = BigBirdModel(config) self.cls = BigBirdOnlyMLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[CausalLMOutputWithCrossAttentions, Tuple[torch.FloatTensor]]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) lm_loss = None if labels is not None: # we are doing next-token prediction; shift prediction scores and input ids by one shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((lm_loss,) + output) if lm_loss is not None else output return CausalLMOutputWithCrossAttentions( loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def _reorder_cache(self, past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2]) + layer_past[2:], ) return reordered_past class BigBirdClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) self.config = config def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = ACT2FN[self.config.hidden_act](x) x = self.dropout(x) x = self.out_proj(x) return x @add_start_docstrings( """ BigBird Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, BIG_BIRD_START_DOCSTRING, ) class BigBirdForSequenceClassification(BigBirdPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.bert = BigBirdModel(config) self.classifier = BigBirdClassificationHead(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[SequenceClassifierOutput, Tuple[torch.FloatTensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: Example: ```python >>> import torch >>> from transformers import AutoTokenizer, BigBirdForSequenceClassification >>> from datasets import load_dataset >>> tokenizer = AutoTokenizer.from_pretrained("l-yohai/bigbird-roberta-base-mnli") >>> model = BigBirdForSequenceClassification.from_pretrained("l-yohai/bigbird-roberta-base-mnli") >>> squad_ds = load_dataset("rajpurkar/squad_v2", split="train") # doctest: +IGNORE_RESULT >>> LONG_ARTICLE = squad_ds[81514]["context"] >>> inputs = tokenizer(LONG_ARTICLE, return_tensors="pt") >>> # long input article >>> list(inputs["input_ids"].shape) [1, 919] >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_class_id = logits.argmax().item() >>> model.config.id2label[predicted_class_id] 'LABEL_0' ``` ```python >>> num_labels = len(model.config.id2label) >>> model = BigBirdForSequenceClassification.from_pretrained( ... "l-yohai/bigbird-roberta-base-mnli", num_labels=num_labels ... ) >>> labels = torch.tensor(1) >>> loss = model(**inputs, labels=labels).loss >>> round(loss.item(), 2) 1.13 ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ BigBird Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, BIG_BIRD_START_DOCSTRING, ) class BigBirdForMultipleChoice(BigBirdPreTrainedModel): def __init__(self, config): super().__init__(config) self.bert = BigBirdModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward( BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[MultipleChoiceModelOutput, Tuple[torch.FloatTensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ BigBird Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, BIG_BIRD_START_DOCSTRING, ) class BigBirdForTokenClassification(BigBirdPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.bert = BigBirdModel(config) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[TokenClassifierOutput, Tuple[torch.FloatTensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class BigBirdForQuestionAnsweringHead(nn.Module): """Head for question answering tasks.""" def __init__(self, config): super().__init__() self.dropout = nn.Dropout(config.hidden_dropout_prob) self.intermediate = BigBirdIntermediate(config) self.output = BigBirdOutput(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) def forward(self, encoder_output): hidden_states = self.dropout(encoder_output) hidden_states = self.intermediate(hidden_states) hidden_states = self.output(hidden_states, encoder_output) hidden_states = self.qa_outputs(hidden_states) return hidden_states @add_start_docstrings( """ BigBird Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, BIG_BIRD_START_DOCSTRING, ) class BigBirdForQuestionAnswering(BigBirdPreTrainedModel): def __init__(self, config, add_pooling_layer=False): super().__init__(config) config.num_labels = 2 self.num_labels = config.num_labels self.sep_token_id = config.sep_token_id self.bert = BigBirdModel(config, add_pooling_layer=add_pooling_layer) self.qa_classifier = BigBirdForQuestionAnsweringHead(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=BigBirdForQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, question_lengths: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[BigBirdForQuestionAnsweringModelOutput, Tuple[torch.FloatTensor]]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. Returns: Example: ```python >>> import torch >>> from transformers import AutoTokenizer, BigBirdForQuestionAnswering >>> from datasets import load_dataset >>> tokenizer = AutoTokenizer.from_pretrained("google/bigbird-roberta-base") >>> model = BigBirdForQuestionAnswering.from_pretrained("google/bigbird-roberta-base") >>> squad_ds = load_dataset("rajpurkar/squad_v2", split="train") # doctest: +IGNORE_RESULT >>> # select random article and question >>> LONG_ARTICLE = squad_ds[81514]["context"] >>> QUESTION = squad_ds[81514]["question"] >>> QUESTION 'During daytime how high can the temperatures reach?' >>> inputs = tokenizer(QUESTION, LONG_ARTICLE, return_tensors="pt") >>> # long article and question input >>> list(inputs["input_ids"].shape) [1, 929] >>> with torch.no_grad(): ... outputs = model(**inputs) >>> answer_start_index = outputs.start_logits.argmax() >>> answer_end_index = outputs.end_logits.argmax() >>> predict_answer_token_ids = inputs.input_ids[0, answer_start_index : answer_end_index + 1] >>> predict_answer_token = tokenizer.decode(predict_answer_token_ids) ``` ```python >>> target_start_index, target_end_index = torch.tensor([130]), torch.tensor([132]) >>> outputs = model(**inputs, start_positions=target_start_index, end_positions=target_end_index) >>> loss = outputs.loss ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict seqlen = input_ids.size(1) if input_ids is not None else inputs_embeds.size(1) if question_lengths is None and input_ids is not None: # assuming input_ids format: <cls> <question> <sep> context <sep> question_lengths = torch.argmax(input_ids.eq(self.sep_token_id).int(), dim=-1) + 1 question_lengths.unsqueeze_(1) logits_mask = None if question_lengths is not None: # setting lengths logits to `-inf` logits_mask = self.prepare_question_mask(question_lengths, seqlen) if token_type_ids is None: token_type_ids = torch.ones(logits_mask.size(), dtype=int, device=logits_mask.device) - logits_mask logits_mask = logits_mask logits_mask[:, 0] = False logits_mask.unsqueeze_(2) outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_classifier(sequence_output) if logits_mask is not None: # removing question tokens from the competition logits = logits - logits_mask * 1e6 start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return BigBirdForQuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @staticmethod def prepare_question_mask(q_lengths: torch.Tensor, maxlen: int): # q_lengths -> (bz, 1) mask = torch.arange(0, maxlen).to(q_lengths.device) mask.unsqueeze_(0) # -> (1, maxlen) mask = torch.where(mask < q_lengths, 1, 0) return mask __all__ = [ "BigBirdForCausalLM", "BigBirdForMaskedLM", "BigBirdForMultipleChoice", "BigBirdForPreTraining", "BigBirdForQuestionAnswering", "BigBirdForSequenceClassification", "BigBirdForTokenClassification", "BigBirdLayer", "BigBirdModel", "BigBirdPreTrainedModel", "load_tf_weights_in_big_bird", ]
transformers/src/transformers/models/big_bird/modeling_big_bird.py/0
{ "file_path": "transformers/src/transformers/models/big_bird/modeling_big_bird.py", "repo_id": "transformers", "token_count": 64698 }
95
# coding=utf-8 # Copyright 2021, The Facebook, Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fast tokenization class for BlenderbotSmall.""" from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } class BlenderbotSmallTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" BlenderbotSmall tokenizer (backed by HuggingFace's *tokenizers* library). Args: vocab_file (`str`): Path to the vocabulary file. """ vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = BlenderbotSmallTokenizer def __init__( self, vocab_file=None, merges_file=None, unk_token="<|endoftext|>", bos_token="<|endoftext|>", eos_token="<|endoftext|>", add_prefix_space=False, trim_offsets=True, **kwargs, ): super().__init__( ByteLevelBPETokenizer( vocab=vocab_file, merges=merges_file, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets, ), bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs, ) self.add_prefix_space = add_prefix_space def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id] if token_ids_1 is None: return output return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. BlenderbotSmall does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] __all__ = ["BlenderbotSmallTokenizerFast"]
transformers/src/transformers/models/blenderbot_small/tokenization_blenderbot_small_fast.py/0
{ "file_path": "transformers/src/transformers/models/blenderbot_small/tokenization_blenderbot_small_fast.py", "repo_id": "transformers", "token_count": 1422 }
96
# coding=utf-8 # Copyright 2022 the Big Science Workshop and HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Bloom configuration""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional from packaging import version if TYPE_CHECKING: from ... import PreTrainedTokenizer, TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import is_torch_available, logging logger = logging.get_logger(__name__) class BloomConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`BloomModel`]. It is used to instantiate a Bloom model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to the Bloom architecture [bigscience/bloom](https://huggingface.co/bigscience/bloom). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 250880): Vocabulary size of the Bloom model. Defines the maximum number of different tokens that can be represented by the `inputs_ids` passed when calling [`BloomModel`]. Check [this discussion](https://huggingface.co/bigscience/bloom/discussions/120#633d28389addb8530b406c2a) on how the `vocab_size` has been defined. hidden_size (`int`, *optional*, defaults to 64): Dimensionality of the embeddings and hidden states. n_layer (`int`, *optional*, defaults to 2): Number of hidden layers in the Transformer encoder. n_head (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. layer_norm_epsilon (`float`, *optional*, defaults to 1e-5): The epsilon to use in the layer normalization layers. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. apply_residual_connection_post_layernorm (`bool`, *optional*, defaults to `False`): If enabled, use the layer norm of the hidden states as the residual in the transformer blocks hidden_dropout (`float`, *optional*, defaults to 0.1): Dropout rate of the dropout function on the bias dropout. attention_dropout (`float`, *optional*, defaults to 0.1): Dropout rate applied to the attention probs use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). pretraining_tp (`int`, *optional*, defaults to `1`): Experimental feature. Tensor parallelism rank used during pretraining with Megatron. Please refer to [this document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is necessary to ensure exact reproducibility of the pretraining results. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/76232). Note also that this is enabled only when `slow_but_exact=True`. slow_but_exact (`bool`, *optional*, defaults to `False`): Experimental feature. Whether to use slow but exact implementation of the attention mechanism. While merging the TP rank tensors, due to slicing operations the results may be slightly different between the model trained on Megatron and our model. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/76232). A solution to obtain more accurate results is to enable this feature. Enabling this will hurt the computational time of the inference. Will be probably resolved in the future once the main model has been fine-tuned with TP_rank=1. Example: ```python >>> from transformers import BloomConfig, BloomModel >>> # Initializing a Bloom configuration >>> configuration = BloomConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = BloomModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "bloom" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "num_hidden_layers": "n_layer", "num_attention_heads": "n_head", } def __init__( self, vocab_size=250880, hidden_size=64, n_layer=2, n_head=8, layer_norm_epsilon=1e-5, initializer_range=0.02, use_cache=True, bos_token_id=1, eos_token_id=2, apply_residual_connection_post_layernorm=False, hidden_dropout=0.0, attention_dropout=0.0, pretraining_tp=1, # TP rank used when training with megatron slow_but_exact=False, **kwargs, ): self.vocab_size = vocab_size # Backward compatibility with n_embed kwarg n_embed = kwargs.pop("n_embed", None) self.hidden_size = hidden_size if n_embed is None else n_embed self.n_layer = n_layer self.n_head = n_head self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.use_cache = use_cache self.pretraining_tp = pretraining_tp self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.slow_but_exact = slow_but_exact super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) class BloomOnnxConfig(OnnxConfigWithPast): torch_onnx_minimum_version = version.parse("1.12") def __init__( self, config: PretrainedConfig, task: str = "default", patching_specs: List[PatchingSpec] = None, use_past: bool = False, ): super().__init__(config, task=task, patching_specs=patching_specs, use_past=use_past) if not getattr(self._config, "pad_token_id", None): # TODO: how to do that better? self._config.pad_token_id = 0 @property def inputs(self) -> Mapping[str, Mapping[int, str]]: common_inputs = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}}) if self.use_past: # BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344 self.fill_with_past_key_values_(common_inputs, direction="inputs", inverted_values_shape=True) common_inputs["attention_mask"] = {0: "batch", 1: "past_sequence + sequence"} else: common_inputs["attention_mask"] = {0: "batch", 1: "sequence"} return common_inputs @property def num_layers(self) -> int: return self._config.n_layer @property def num_attention_heads(self) -> int: return self._config.n_head @property def atol_for_validation(self) -> float: return 1e-3 def generate_dummy_inputs( self, tokenizer: "PreTrainedTokenizer", batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional["TensorType"] = None, ) -> Mapping[str, Any]: common_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs( tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework ) # We need to order the input in the way they appears in the forward() ordered_inputs = OrderedDict({"input_ids": common_inputs["input_ids"]}) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") else: import torch batch, seqlen = common_inputs["input_ids"].shape # Not using the same length for past_key_values past_key_values_length = seqlen + 2 head_dim = self._config.hidden_size // self.num_attention_heads past_key_shape = ( batch * self.num_attention_heads, head_dim, past_key_values_length, ) past_value_shape = ( batch * self.num_attention_heads, past_key_values_length, head_dim, ) ordered_inputs["past_key_values"] = [ (torch.zeros(past_key_shape), torch.zeros(past_value_shape)) for _ in range(self.num_layers) ] ordered_inputs["attention_mask"] = common_inputs["attention_mask"] if self.use_past: mask_dtype = ordered_inputs["attention_mask"].dtype ordered_inputs["attention_mask"] = torch.cat( [ordered_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1 ) return ordered_inputs @property def default_onnx_opset(self) -> int: return 13 __all__ = ["BloomConfig", "BloomOnnxConfig"]
transformers/src/transformers/models/bloom/configuration_bloom.py/0
{ "file_path": "transformers/src/transformers/models/bloom/configuration_bloom.py", "repo_id": "transformers", "token_count": 4063 }
97
# coding=utf-8 # Copyright 2024 Meta Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for Chameleon.""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( get_resize_output_image_size, resize, to_channel_dimension_format, ) from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, is_valid_image, to_numpy_array, valid_images, validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging logger = logging.get_logger(__name__) if is_vision_available(): import PIL def make_batched_images(images) -> List[List[ImageInput]]: """ Accepts images in list or nested list format, and makes a list of images for preprocessing. Args: images (`Union[List[List[ImageInput]], List[ImageInput], ImageInput]`): The input image. Returns: list: A list of images. """ if isinstance(images, (list, tuple)) and isinstance(images[0], (list, tuple)) and is_valid_image(images[0][0]): return [img for img_list in images for img in img_list] elif isinstance(images, (list, tuple)) and is_valid_image(images[0]): return images elif is_valid_image(images): return [images] raise ValueError(f"Could not make batched video from {images}") class ChameleonImageProcessor(BaseImageProcessor): r""" Constructs a Chameleon image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 512}`): Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to 1): Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_center_crop (`bool`, *optional*, defaults to `True`): Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the `preprocess` method. crop_size (`Dict[str, int]` *optional*, defaults to {"height": 512, "width": 512}): Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to 0.0078): Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `[1.0, 1.0, 1.0]`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `[1.0, 1.0, 1.0]`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PIL.Image.LANCZOS, do_center_crop: bool = True, crop_size: Dict[str, int] = None, do_rescale: bool = True, rescale_factor: Union[int, float] = 0.0078, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_convert_rgb: bool = True, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"shortest_edge": 512} size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else {"height": 512, "width": 512} crop_size = get_size_dict(crop_size, default_to_square=True, param_name="crop_size") self.do_resize = do_resize self.size = size self.resample = resample self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else [1.0, 1.0, 1.0] self.image_std = image_std if image_std is not None else [1.0, 1.0, 1.0] self.do_convert_rgb = do_convert_rgb # Copied from transformers.models.clip.image_processing_clip.CLIPImageProcessor.resize def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ default_to_square = True if "shortest_edge" in size: size = size["shortest_edge"] default_to_square = False elif "height" in size and "width" in size: size = (size["height"], size["width"]) else: raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.") output_size = get_resize_output_image_size( image, size=size, default_to_square=default_to_square, input_data_format=input_data_format, ) return resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) @filter_out_non_signature_kwargs() def preprocess( self, images: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, resample: PILImageResampling = None, do_center_crop: bool = None, crop_size: int = None, do_rescale: bool = None, rescale_factor: float = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_convert_rgb: bool = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the image. crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the center crop. Only has an effect if `do_center_crop` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size, param_name="size", default_to_square=False) resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size, param_name="crop_size", default_to_square=True) do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb images = make_batched_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample, ) if do_convert_rgb: images = [self.blend_rgba(image) for image in images] # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) all_images = [] for image in images: if do_resize: image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) if do_center_crop: image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) if do_rescale: image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize( image=image, mean=image_mean, std=image_std, input_data_format=input_data_format ) all_images.append(image) images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in all_images ] data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) def blend_rgba(self, image: ImageInput) -> ImageInput: """ Convert image to RGB by blending the transparency layer if it's in RGBA format. If image is not `PIL.Image`, it si simply returned without modifications. Args: image (`ImageInput`): Image to convert. """ if not isinstance(image, PIL.Image.Image): return image elif image.mode == "RGB": return image img_rgba = np.array(image.convert("RGBA")) # If there is no transparency layer, simple convert and return. if not (img_rgba[:, :, 3] < 255).any(): return image.convert("RGB") # There is a transparency layer, blend it with a white background. # Calculate the alpha proportion for blending. alpha = img_rgba[:, :, 3] / 255.0 img_rgb = (1 - alpha[:, :, np.newaxis]) * 255 + alpha[:, :, np.newaxis] * img_rgba[:, :, :3] return PIL.Image.fromarray(img_rgb.astype("uint8"), "RGB") __all__ = ["ChameleonImageProcessor"]
transformers/src/transformers/models/chameleon/image_processing_chameleon.py/0
{ "file_path": "transformers/src/transformers/models/chameleon/image_processing_chameleon.py", "repo_id": "transformers", "token_count": 7399 }
98
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for ConvNeXT.""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging if is_vision_available(): import PIL logger = logging.get_logger(__name__) class ConvNextImageProcessor(BaseImageProcessor): r""" Constructs a ConvNeXT image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overriden by `do_resize` in the `preprocess` method. size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 384}`): Resolution of the output image after `resize` is applied. If `size["shortest_edge"]` >= 384, the image is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the image will be matched to `int(size["shortest_edge"]/crop_pct)`, after which the image is cropped to `(size["shortest_edge"], size["shortest_edge"])`. Only has an effect if `do_resize` is set to `True`. Can be overriden by `size` in the `preprocess` method. crop_pct (`float` *optional*, defaults to 224 / 256): Percentage of the image to crop. Only has an effect if `do_resize` is `True` and size < 384. Can be overriden by `crop_pct` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overriden by `resample` in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overriden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overriden by `rescale_factor` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, crop_pct: float = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"shortest_edge": 384} size = get_size_dict(size, default_to_square=False) self.do_resize = do_resize self.size = size # Default value set here for backwards compatibility where the value in config is None self.crop_pct = crop_pct if crop_pct is not None else 224 / 256 self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD def resize( self, image: np.ndarray, size: Dict[str, int], crop_pct: float, resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Dictionary of the form `{"shortest_edge": int}`, specifying the size of the output image. If `size["shortest_edge"]` >= 384 image is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the image will be matched to `int(size["shortest_edge"] / crop_pct)`, after which the image is cropped to `(size["shortest_edge"], size["shortest_edge"])`. crop_pct (`float`): Percentage of the image to crop. Only has an effect if size < 384. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred from the input image. """ size = get_size_dict(size, default_to_square=False) if "shortest_edge" not in size: raise ValueError(f"Size dictionary must contain 'shortest_edge' key. Got {size.keys()}") shortest_edge = size["shortest_edge"] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct resize_shortest_edge = int(shortest_edge / crop_pct) resize_size = get_resize_output_image_size( image, size=resize_shortest_edge, default_to_square=False, input_data_format=input_data_format ) image = resize( image=image, size=resize_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) # then crop to (shortest_edge, shortest_edge) return center_crop( image=image, size=(shortest_edge, shortest_edge), data_format=data_format, input_data_format=input_data_format, **kwargs, ) else: # warping (no cropping) when evaluated at 384 or larger return resize( image, size=(shortest_edge, shortest_edge), resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) @filter_out_non_signature_kwargs() def preprocess( self, images: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, crop_pct: float = None, resample: PILImageResampling = None, do_rescale: bool = None, rescale_factor: float = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the output image after `resize` has been applied. If `size["shortest_edge"]` >= 384, the image is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the image will be matched to `int(size["shortest_edge"]/ crop_pct)`, after which the image is cropped to `(size["shortest_edge"], size["shortest_edge"])`. Only has an effect if `do_resize` is set to `True`. crop_pct (`float`, *optional*, defaults to `self.crop_pct`): Percentage of the image to crop if size < 384. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of `PILImageResampling`, filters. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize crop_pct = crop_pct if crop_pct is not None else self.crop_pct resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample, ) # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_resize: images = [ self.resize( image=image, size=size, crop_pct=crop_pct, resample=resample, input_data_format=input_data_format ) for image in images ] if do_rescale: images = [ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images ] if do_normalize: images = [ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images ] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) __all__ = ["ConvNextImageProcessor"]
transformers/src/transformers/models/convnext/image_processing_convnext.py/0
{ "file_path": "transformers/src/transformers/models/convnext/image_processing_convnext.py", "repo_id": "transformers", "token_count": 6836 }
99
# coding=utf-8 # Copyright 2018 Salesforce and HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Salesforce CTRL configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class CTRLConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`CTRLModel`] or a [`TFCTRLModel`]. It is used to instantiate a CTRL model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [Salesforce/ctrl](https://huggingface.co/Salesforce/ctrl) architecture from SalesForce. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 246534): Vocabulary size of the CTRL model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`CTRLModel`] or [`TFCTRLModel`]. n_positions (`int`, *optional*, defaults to 256): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). n_embd (`int`, *optional*, defaults to 1280): Dimensionality of the embeddings and hidden states. dff (`int`, *optional*, defaults to 8192): Dimensionality of the inner dimension of the feed forward networks (FFN). n_layer (`int`, *optional*, defaults to 48): Number of hidden layers in the Transformer encoder. n_head (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. resid_pdrop (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. embd_pdrop (`int`, *optional*, defaults to 0.1): The dropout ratio for the embeddings. layer_norm_epsilon (`float`, *optional*, defaults to 1e-06): The epsilon to use in the layer normalization layers initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Examples: ```python >>> from transformers import CTRLConfig, CTRLModel >>> # Initializing a CTRL configuration >>> configuration = CTRLConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = CTRLModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "ctrl" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self, vocab_size=246534, n_positions=256, n_embd=1280, dff=8192, n_layer=48, n_head=16, resid_pdrop=0.1, embd_pdrop=0.1, layer_norm_epsilon=1e-6, initializer_range=0.02, use_cache=True, **kwargs, ): self.vocab_size = vocab_size self.n_positions = n_positions self.n_embd = n_embd self.n_layer = n_layer self.n_head = n_head self.dff = dff self.resid_pdrop = resid_pdrop self.embd_pdrop = embd_pdrop self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.use_cache = use_cache super().__init__(**kwargs) __all__ = ["CTRLConfig"]
transformers/src/transformers/models/ctrl/configuration_ctrl.py/0
{ "file_path": "transformers/src/transformers/models/ctrl/configuration_ctrl.py", "repo_id": "transformers", "token_count": 1722 }
100
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert DETA checkpoints from the original repository. URL: https://github.com/jozhang97/DETA/tree/master""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_deta_config(): config = DetaConfig( num_queries=900, encoder_ffn_dim=2048, decoder_ffn_dim=2048, num_feature_levels=5, assign_first_stage=True, with_box_refine=True, two_stage=True, ) # set labels config.num_labels = 91 repo_id = "huggingface/label-files" filename = "coco-detection-id2label.json" id2label = json.loads(Path(hf_hub_download(repo_id, filename, repo_type="dataset")).read_text()) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} return config # here we list all keys to be renamed (original name on the left, our name on the right) def create_rename_keys(config): rename_keys = [] # stem # fmt: off rename_keys.append(("backbone.0.body.conv1.weight", "model.backbone.model.embedder.embedder.convolution.weight")) rename_keys.append(("backbone.0.body.bn1.weight", "model.backbone.model.embedder.embedder.normalization.weight")) rename_keys.append(("backbone.0.body.bn1.bias", "model.backbone.model.embedder.embedder.normalization.bias")) rename_keys.append(("backbone.0.body.bn1.running_mean", "model.backbone.model.embedder.embedder.normalization.running_mean")) rename_keys.append(("backbone.0.body.bn1.running_var", "model.backbone.model.embedder.embedder.normalization.running_var")) # stages for stage_idx in range(len(config.backbone_config.depths)): for layer_idx in range(config.backbone_config.depths[stage_idx]): # shortcut if layer_idx == 0: rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight", f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight", f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias", f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean", f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var", f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var", ) ) # 3 convs for i in range(3): rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight", f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight", f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias", f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean", f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var", f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var", ) ) # transformer encoder for i in range(config.encoder_layers): rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", f"model.encoder.layers.{i}.self_attn.sampling_offsets.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", f"model.encoder.layers.{i}.self_attn.sampling_offsets.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", f"model.encoder.layers.{i}.self_attn.attention_weights.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", f"model.encoder.layers.{i}.self_attn.attention_weights.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.weight", f"model.encoder.layers.{i}.self_attn.value_proj.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.bias", f"model.encoder.layers.{i}.self_attn.value_proj.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.weight", f"model.encoder.layers.{i}.self_attn.output_proj.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.bias", f"model.encoder.layers.{i}.self_attn.output_proj.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.norm1.weight", f"model.encoder.layers.{i}.self_attn_layer_norm.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"model.encoder.layers.{i}.self_attn_layer_norm.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"model.encoder.layers.{i}.fc1.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"model.encoder.layers.{i}.fc1.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"model.encoder.layers.{i}.fc2.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"model.encoder.layers.{i}.fc2.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"model.encoder.layers.{i}.final_layer_norm.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"model.encoder.layers.{i}.final_layer_norm.bias")) # transformer decoder for i in range(config.decoder_layers): rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", f"model.decoder.layers.{i}.encoder_attn.attention_weights.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", f"model.decoder.layers.{i}.encoder_attn.attention_weights.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", f"model.decoder.layers.{i}.encoder_attn.value_proj.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", f"model.decoder.layers.{i}.encoder_attn.value_proj.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", f"model.decoder.layers.{i}.encoder_attn.output_proj.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", f"model.decoder.layers.{i}.encoder_attn.output_proj.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.norm1.weight", f"model.decoder.layers.{i}.encoder_attn_layer_norm.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"model.decoder.layers.{i}.encoder_attn_layer_norm.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"model.decoder.layers.{i}.self_attn.out_proj.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"model.decoder.layers.{i}.self_attn.out_proj.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.norm2.weight", f"model.decoder.layers.{i}.self_attn_layer_norm.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.norm2.bias", f"model.decoder.layers.{i}.self_attn_layer_norm.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"model.decoder.layers.{i}.fc1.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"model.decoder.layers.{i}.fc1.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"model.decoder.layers.{i}.fc2.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"model.decoder.layers.{i}.fc2.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"model.decoder.layers.{i}.final_layer_norm.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"model.decoder.layers.{i}.final_layer_norm.bias")) # fmt: on return rename_keys def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val def read_in_decoder_q_k_v(state_dict, config): # transformer decoder self-attention layers hidden_size = config.d_model for i in range(config.decoder_layers): # read in weights + bias of input projection layer of self-attention in_proj_weight = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_weight") in_proj_bias = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"model.decoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:hidden_size, :] state_dict[f"model.decoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:hidden_size] state_dict[f"model.decoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[ hidden_size : hidden_size * 2, : ] state_dict[f"model.decoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[hidden_size : hidden_size * 2] state_dict[f"model.decoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-hidden_size:, :] state_dict[f"model.decoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-hidden_size:] # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_deta_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub): """ Copy/paste/tweak model's weights to our DETA structure. """ # load config config = get_deta_config() # load original state dict if model_name == "deta-resnet-50": filename = "adet_checkpoint0011.pth" elif model_name == "deta-resnet-50-24-epochs": filename = "adet_2x_checkpoint0023.pth" else: raise ValueError(f"Model name {model_name} not supported") checkpoint_path = hf_hub_download(repo_id="nielsr/deta-checkpoints", filename=filename) state_dict = torch.load(checkpoint_path, map_location="cpu")["model"] # rename keys rename_keys = create_rename_keys(config) for src, dest in rename_keys: rename_key(state_dict, src, dest) read_in_decoder_q_k_v(state_dict, config) # fix some prefixes for key in state_dict.copy().keys(): if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key: val = state_dict.pop(key) state_dict[key.replace("transformer.decoder", "model.decoder")] = val if "input_proj" in key: val = state_dict.pop(key) state_dict["model." + key] = val if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key: val = state_dict.pop(key) state_dict[key.replace("transformer", "model")] = val # finally, create HuggingFace model and load state dict model = DetaForObjectDetection(config) model.load_state_dict(state_dict) model.eval() device = "cuda" if torch.cuda.is_available() else "cpu" model.to(device) # load image processor processor = DetaImageProcessor(format="coco_detection") # verify our conversion on image img = prepare_img() encoding = processor(images=img, return_tensors="pt") pixel_values = encoding["pixel_values"] outputs = model(pixel_values.to(device)) # verify logits if model_name == "deta-resnet-50": expected_logits = torch.tensor( [[-7.3978, -2.5406, -4.1668], [-8.2684, -3.9933, -3.8096], [-7.0515, -3.7973, -5.8516]] ) expected_boxes = torch.tensor([[0.5043, 0.4973, 0.9998], [0.2542, 0.5489, 0.4748], [0.5490, 0.2765, 0.0570]]) elif model_name == "deta-resnet-50-24-epochs": expected_logits = torch.tensor( [[-7.1688, -2.4857, -4.8669], [-7.8630, -3.8154, -4.2674], [-7.2730, -4.1865, -5.5323]] ) expected_boxes = torch.tensor([[0.5021, 0.4971, 0.9994], [0.2546, 0.5486, 0.4731], [0.1686, 0.1986, 0.2142]]) assert torch.allclose(outputs.logits[0, :3, :3], expected_logits.to(device), atol=1e-4) assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes.to(device), atol=1e-4) print("Everything ok!") if pytorch_dump_folder_path: # Save model and processor logger.info(f"Saving PyTorch model and processor to {pytorch_dump_folder_path}...") Path(pytorch_dump_folder_path).mkdir(exist_ok=True) model.save_pretrained(pytorch_dump_folder_path) processor.save_pretrained(pytorch_dump_folder_path) # Push to hub if push_to_hub: print("Pushing model and processor to hub...") model.push_to_hub(f"jozhang97/{model_name}") processor.push_to_hub(f"jozhang97/{model_name}") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--model_name", type=str, default="deta-resnet-50", choices=["deta-resnet-50", "deta-resnet-50-24-epochs"], help="Name of the model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model.", ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) args = parser.parse_args() convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
transformers/src/transformers/models/deprecated/deta/convert_deta_resnet_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/deprecated/deta/convert_deta_resnet_to_pytorch.py", "repo_id": "transformers", "token_count": 7793 }
101
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert GPTSANJapanese checkpoints from the original repository to pytorch model.""" import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def convert_tf_gptsan_to_pt(args): parameter_file = os.path.join(args.tf_model_dir, "parameters.json") params = json.loads(open(parameter_file).read()) if not params: raise ValueError( f"It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file." ) if not args.output.endswith(".pt"): args.output = args.output + ".pt" new_state = OrderedDict() with tf.device("/CPU:0"): reader = tf.train.load_checkpoint(args.tf_model_dir) shapes = reader.get_variable_to_shape_map() for key_name in shapes.keys(): vnp = reader.get_tensor(key_name).astype(np.float16) if key_name.endswith("/adam_m") or key_name.endswith("/adam_v"): continue if key_name.startswith("pasts/"): if key_name.startswith("pasts/mlp"): player = int(key_name[9]) elif key_name.startswith("pasts/out"): player = 8 name = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time state = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix new_state[name] = torch.tensor(state) elif key_name.startswith("model/moe"): player = int(key_name[9:].split("/")[0]) if key_name.endswith("/switch_gating/kernel"): name = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player state = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix new_state[name] = torch.tensor(state) elif key_name.endswith("/softmlp/kernel"): name = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player state = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix new_state[name] = torch.tensor(state) elif key_name.endswith("/wo/kernel") or key_name.endswith("/wi/kernel"): nlayer = key_name[-9:-7] for i in range(16): name = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer) state = ( vnp[i].transpose([1, 0]).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided new_state[name] = torch.tensor(state) elif key_name.startswith("model/mlp"): player = int(key_name[9:].split("/")[0]) if key_name.endswith("/p1/kernel"): name = "model.blocks.%d.feed_forward.mlp.wi.weight" % player state = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix new_state[name] = torch.tensor(state) elif key_name.endswith("/p1/bias"): name = "model.blocks.%d.feed_forward.mlp.wi.bias" % player state = vnp.copy() # same because it is one dimensional new_state[name] = torch.tensor(state) elif key_name.endswith("/p2/kernel"): name = "model.blocks.%d.feed_forward.mlp.wo.weight" % player state = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix new_state[name] = torch.tensor(state) elif key_name.endswith("/p2/bias"): name = "model.blocks.%d.feed_forward.mlp.wo.bias" % player state = vnp.copy() # same because it is one dimensional new_state[name] = torch.tensor(state) elif key_name.startswith("model/ln"): player = int(key_name[8:].split("/")[0]) if key_name.endswith("/b"): name = "model.blocks.%d.feed_forward.norm.bias" % player state = vnp.copy() # same because it is one dimensional new_state[name] = torch.tensor(state) elif key_name.endswith("/g"): name = "model.blocks.%d.feed_forward.norm.weight" % player state = vnp.copy() # same because it is one dimensional new_state[name] = torch.tensor(state) elif key_name.startswith("model/att"): player = int(key_name[9:].split("/")[0]) if key_name.endswith("/qkv/kernel"): state = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum state_q = state[:, 0, :, :] state_k = state[:, 1, :, :] state_v = state[:, 2, :, :] state_q = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]]) .transpose([1, 0]) .copy() ) # Mesh-Tensorflow is a diagonal matrix state_k = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]]) .transpose([1, 0]) .copy() ) # Mesh-Tensorflow is a diagonal matrix state_v = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]]) .transpose([1, 0]) .copy() ) # Mesh-Tensorflow is a diagonal matrix name = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player new_state[name] = torch.tensor(state_q) name = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player new_state[name] = torch.tensor(state_k) name = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player new_state[name] = torch.tensor(state_v) elif key_name.endswith("/o/kernel"): name = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player state = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]]).transpose([1, 0]).copy() ) # Mesh-Tensorflow is a diagonal matrix new_state[name] = torch.tensor(state) elif key_name.startswith("model/an"): player = int(key_name[8:].split("/")[0]) if key_name.endswith("/b"): name = "model.blocks.%d.self_attn.norm.bias" % player state = vnp.copy() # same because it is one dimensional new_state[name] = torch.tensor(state) elif key_name.endswith("/g"): name = "model.blocks.%d.self_attn.norm.weight" % player state = vnp.copy() # same because it is one dimensional new_state[name] = torch.tensor(state) elif ( key_name.startswith("model/wte") or key_name.startswith("model/wpe") or key_name.startswith("model/ete") ): nlayer = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[ key_name[-3:] ] name = "model.%s.weight" % nlayer state = vnp.copy() # same in embedded new_state[name] = torch.tensor(state) if key_name.startswith("model/wte"): name = "lm_head.weight" state = vnp.copy() # same in embedded new_state[name] = torch.tensor(state) elif key_name.startswith("model/wob"): name = "final_logits_bias" state = vnp.copy() # same in embedded state = state.reshape((1, -1)) new_state[name] = torch.tensor(state) elif key_name == "model/dense/kernel": name = "model.last_project.weight" state = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix new_state[name] = torch.tensor(state) elif key_name == "model/dense_1/bias": name = "model.last_project.bias" state = vnp.copy() # same because it is one dimensional new_state[name] = torch.tensor(state) torch.save(new_state, args.output) if __name__ == "__main__": parser = argparse.ArgumentParser( description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model") parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model") args = parser.parse_args() convert_tf_gptsan_to_pt(args)
transformers/src/transformers/models/deprecated/gptsan_japanese/convert_gptsan_tf_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/deprecated/gptsan_japanese/convert_gptsan_tf_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 5113 }
102
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch M-CTC-T model.""" import math from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ....activations import ACT2FN from ....file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ....integrations.deepspeed import is_deepspeed_zero3_enabled from ....integrations.fsdp import is_fsdp_managed_module from ....modeling_attn_mask_utils import _prepare_4d_attention_mask from ....modeling_outputs import BaseModelOutput, CausalLMOutput from ....modeling_utils import ( PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) from ....utils import logging from .configuration_mctct import MCTCTConfig logger = logging.get_logger(__name__) _HIDDEN_STATES_START_POSITION = 1 _CONFIG_FOR_DOC = "MCTCTConfig" # Base docstring _CHECKPOINT_FOR_DOC = "speechbrain/m-ctc-t-large" _EXPECTED_OUTPUT_SHAPE = [1, 195, 1536] # CTC docstring _CTC_EXPECTED_OUTPUT = '"Mr. Quilter is the apostle of the middle classes, and we\'re glad to welcome his gospel."' _CTC_EXPECTED_LOSS = 1885.65 class MCTCTConv1dSubsampler(nn.Module): """ Convolutional subsampler: a stack of 1D convolution (along temporal dimension) followed by non-linear activation via gated linear units (https://arxiv.org/abs/1911.08460) """ def __init__(self, config): super().__init__() self.config = config self.glu_dim = config.conv_glu_dim self.dropout = nn.Dropout(config.conv_dropout) self.num_layers = config.num_conv_layers self.in_channels = config.input_feat_per_channel * config.input_channels if self.num_layers > 1: if config.conv_channels is None: raise ValueError( "Need to specify `conv_channels` configuration in `MCTCTConfig` to use multiple convolution" " layers." ) self.mid_channels = config.conv_channels else: self.mid_channels = None self.out_channels = config.hidden_size * 2 # considering GLU halving self.kernel_size = config.conv_kernel self.stride = config.conv_stride # NOTE: MCTCT by construction only uses one convolution kernel. I've made this flexible to allow for # multiple layers of convolutions, but not sure if this model definition should just restrict it # to one layer. This becomes especially relevant when considering the padding like line 1 of forward(). self.conv_layers = nn.ModuleList( nn.Conv1d( self.in_channels if i == 0 else self.mid_channels[i], self.mid_channels[i] if i < self.num_layers - 1 else self.out_channels, kernel_size=k, stride=self.stride[i], padding="valid", ) for i, k in enumerate(self.kernel_size) ) def forward(self, input_features): # NOTE: in reference to the NOTE in __init__, right now it just calculates padding as if # there will be just one conv layer. padding = sum([size // 2 for size in self.kernel_size]) # (7, 7) -> (3, 3) input_features = torch.nn.functional.pad(input_features, (0, 0, padding, padding), "constant", 0) hidden_states = input_features.transpose(1, 2).contiguous() # -> Batch x Frame x Time for conv in self.conv_layers: hidden_states = conv(hidden_states) hidden_states = nn.functional.glu(hidden_states, dim=self.glu_dim) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states.transpose(1, 2).contiguous() # -> Batch x Time x Frame return hidden_states class MCTCTEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file # self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.LayerNorm = MCTCTLayerNorm() self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), persistent=False, ) def forward( self, input_features=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 ): input_shape = input_features.size() if input_features is not None else inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_features) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class MCTCTSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = config.attention_head_dim self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=False) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=False) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=False) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def reshape_fortran(self, x, shape): if len(x.shape) > 0: x = x.permute(*reversed(range(len(x.shape)))) return x.reshape(*reversed(shape)).permute(*reversed(range(len(shape)))) def relative_position_embedding_rotate(self, scores): # NOTE: should re-evaluate whether this re-implementation was truly necessary # or the reason why my complete re-haul worked was due to some other part # of the code. Adding this and the reshape fortrain code seems very undesirable. scores = scores.permute(0, 2, 3, 1) # e.g. [10, 1839, 14, 4] batch, hidden_state, seq_len, heads = scores.shape # e.g. [10, 1853, 14, 4] scores = torch.cat((scores, torch.zeros((batch, seq_len, seq_len, heads), device=scores.device)), dim=1) # e.g. [10, 25942, 1, 4] scores = self.reshape_fortran(scores, [batch, (hidden_state + seq_len) * seq_len, 1, heads]) # e.g. [10, 25928, 1, 4] scores = scores[:, : (seq_len + hidden_state - 1) * seq_len] # e.g. [10, 1852, 14, 4] scores = self.reshape_fortran(scores, [batch, hidden_state + seq_len - 1, seq_len, heads]) halfpoint = hidden_state // 2 scores = scores[:, halfpoint : halfpoint + seq_len].transpose(1, 2) # e.g. [10, 14, 14, 4] return scores.permute(0, 3, 1, 2) def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, ): mixed_query_layer = self.query(hidden_states) mixed_query_layer = mixed_query_layer / math.sqrt(self.attention_head_size) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) # relative key position embeddings positional_embedding = self.distance_embedding.weight relative_position_scores = torch.einsum("lh, bche -> bcle", positional_embedding, query_layer.transpose(2, 3)) relative_position_scores = self.relative_position_embedding_rotate(relative_position_scores) attention_scores = attention_scores + relative_position_scores if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in MCTCTModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).flatten(start_dim=-2) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class MCTCTLayerNorm(nn.Module): def __init__(self): super().__init__() self.singleton_weight = nn.Parameter(torch.ones(1)) self.singleton_bias = nn.Parameter(torch.zeros(1)) def forward(self, hidden_states): return (hidden_states * self.singleton_weight) + self.singleton_bias class MCTCTSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.config = config self.dense = nn.Linear(config.hidden_size, config.hidden_size, bias=False) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class MCTCTAttention(nn.Module): def __init__(self, config): super().__init__() self.self = MCTCTSelfAttention(config) self.output = MCTCTSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, ): self_outputs = self.self( hidden_states, attention_mask, head_mask, output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class MCTCTIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size, bias=False) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class MCTCTOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size, bias=False) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class MCTCTLayer(nn.Module): def __init__(self, config: MCTCTConfig): super().__init__() self.seq_len_dim = 1 self.chunk_size_feed_forward = config.chunk_size_feed_forward self.intermediate = MCTCTIntermediate(config) self.attention = MCTCTAttention(config) self.is_decoder = config.is_decoder self.output = MCTCTOutput(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, ): self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class MCTCTPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = MCTCTConfig base_model_prefix = "mctct" main_input_name = "input_features" supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" std = self.config.initializer_range if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, MCTCTLayerNorm): module.singleton_weight.data.fill_(1.0) module.singleton_bias.data.zero_() if isinstance(module, (nn.Linear, nn.Conv1d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor): """ Computes the output length of the convolutional layers """ dilation = 1 for _, kernel_sz, stride in zip( range(self.config.num_conv_layers), self.config.conv_kernel, self.config.conv_stride ): padding = kernel_sz // 2 input_lengths = input_lengths + 2 * padding - dilation * (kernel_sz - 1) - 1 input_lengths = torch.div(input_lengths, stride, rounding_mode="trunc") + 1 return input_lengths def _get_feature_vector_attention_mask(self, feature_vector_length, attention_mask): # generate creates 3D attention mask, because of the shape of input_features # convert it to 2D if thats the case if len(attention_mask.shape) > 2: attention_mask = attention_mask[:, :, -1] # subsampled_lengths = attention_mask.sum(-1) subsampled_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)) bsz = attention_mask.size()[0] attention_mask = torch.zeros( (bsz, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device ) # these two operations makes sure that all values # before the output lengths indices are attended to attention_mask[(torch.arange(bsz, device=attention_mask.device), subsampled_lengths - 1)] = 1 attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).long() return attention_mask MCTCT_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MCTCTConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ MCTCT_INPUTS_DOCSTRING = r""" Args: input_features (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`Wav2Vec2CTCTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ class MCTCTEncoder(MCTCTPreTrainedModel): def __init__(self, config: MCTCTConfig): super().__init__(config) self.hidden_dropout_prob = config.hidden_dropout_prob self.layer_norm = MCTCTLayerNorm() self.conv = MCTCTConv1dSubsampler(config) self.layers = nn.ModuleList([MCTCTLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, input_features: torch.Tensor, attention_mask: torch.Tensor, head_mask: torch.Tensor, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict input_features = self.layer_norm(input_features) inputs_embeds = self.conv(input_features) # subsample attention mask if necessary if attention_mask is not None: attention_mask = self._get_feature_vector_attention_mask(inputs_embeds.shape[1], attention_mask) hidden_states = nn.functional.dropout(inputs_embeds, p=self.hidden_dropout_prob, training=self.training) # expand attention_mask if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: if head_mask.size()[0] != len(self.layers): raise ValueError( f"The head_mask should be specified for {len(self.layers)} layers, " f"but it is for {head_mask.size()[0]}." ) synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self) for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = torch.rand([]) skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False if not skip_the_layer or synced_gpus: # under fsdp or deepspeed zero3 all gpus must run in sync if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( encoder_layer.__call__, hidden_states, attention_mask, (head_mask[idx] if head_mask is not None else None), output_attentions, ) else: layer_outputs = encoder_layer( hidden_states=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if skip_the_layer: layer_outputs = (None, None) if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) @add_start_docstrings( "The bare M-CTC-T Model transformer outputting raw hidden-states without any specific head on top.", MCTCT_START_DOCSTRING, ) class MCTCTModel(MCTCTPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.encoder = MCTCTEncoder(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MCTCT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC, modality="audio", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, input_features: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_features is None: raise ValueError("You have to specify input_features.") encoder_outputs = self.encoder( input_features, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutput( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @add_start_docstrings( """MCTCT Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""", MCTCT_START_DOCSTRING, ) class MCTCTForCTC(MCTCTPreTrainedModel): def __init__(self, config): super().__init__(config) self.mctct = MCTCTModel(config) if config.vocab_size is None: raise ValueError( f"You are trying to instantiate {self.__class__} with a configuration that " "does not define the vocabulary size of the language model head. Please " "instantiate the model as follows: `MCTCTForCTC.from_pretrained(..., vocab_size=vocab_size)`. " "or define `vocab_size` of your model's configuration." ) output_hidden_size = config.hidden_size self.ctc_head = nn.Linear(output_hidden_size, config.vocab_size) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MCTCT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutput, config_class=_CONFIG_FOR_DOC, expected_output=_CTC_EXPECTED_OUTPUT, expected_loss=_CTC_EXPECTED_LOSS, ) def forward( self, input_features: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, ) -> Union[Tuple, CausalLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*): Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size - 1]`. """ if labels is not None and labels.max() >= self.config.vocab_size: raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}") return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mctct( input_features, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] logits = self.ctc_head(hidden_states) loss = None if labels is not None: # retrieve loss input_lengths from attention_mask attention_mask = ( attention_mask if attention_mask is not None else torch.ones(input_features.shape[:-1], dtype=torch.long) ) input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long) # assuming that padded tokens are filled with -100 # when not being attended to labels_mask = labels >= 0 target_lengths = labels_mask.sum(-1) flattened_targets = labels.masked_select(labels_mask) # ctc_loss doesn't support fp16 log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1) with torch.backends.cudnn.flags(enabled=False): loss = nn.functional.ctc_loss( log_probs, flattened_targets, input_lengths, target_lengths, blank=self.config.pad_token_id, reduction=self.config.ctc_loss_reduction, zero_infinity=self.config.ctc_zero_infinity, ) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return CausalLMOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions )
transformers/src/transformers/models/deprecated/mctct/modeling_mctct.py/0
{ "file_path": "transformers/src/transformers/models/deprecated/mctct/modeling_mctct.py", "repo_id": "transformers", "token_count": 13987 }
103
# coding=utf-8 # Copyright 2023 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Open-Llama model configuration""" from ....configuration_utils import PretrainedConfig from ....utils import logging logger = logging.get_logger(__name__) class OpenLlamaConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`OpenLlamaModel`]. It is used to instantiate an Open-Llama model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [s-JoL/Open-Llama-V1](https://huggingface.co/s-JoL/Open-Llama-V1). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 32000): Vocabulary size of the Open-Llama model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`OpenLlamaModel`] hidden_size (`int`, *optional*, defaults to 4096): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 11008): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. tie_word_embeddings(`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update `max_position_embeddings` to the expected new maximum. See the following thread for more information on how these scaling strategies behave: https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an experimental feature, subject to breaking API changes in future versions. Example: ```python >>> from transformers import OpenLlamaModel, OpenLlamaConfig >>> # Initializing a Open-Llama open_llama-7b style configuration >>> configuration = OpenLlamaConfig() >>> # Initializing a model from the open_llama-7b style configuration >>> model = OpenLlamaModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "open-llama" def __init__( self, vocab_size=100000, hidden_size=4096, intermediate_size=11008, num_hidden_layers=32, num_attention_heads=32, hidden_act="silu", max_position_embeddings=2048, initializer_range=0.02, rms_norm_eps=1e-6, use_cache=True, pad_token_id=0, bos_token_id=1, eos_token_id=2, tie_word_embeddings=False, use_memory_efficient_attention=True, hidden_dropout_prob=0.1, attention_dropout_prob=0.1, use_stable_embedding=True, shared_input_output_embedding=True, rope_theta=10000.0, rope_scaling=None, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.use_memory_efficient_attention = kwargs.pop( "use_memorry_efficient_attention", use_memory_efficient_attention ) self.hidden_dropout_prob = hidden_dropout_prob self.attention_dropout_prob = attention_dropout_prob self.use_stable_embedding = use_stable_embedding self.shared_input_output_embedding = shared_input_output_embedding self.rope_theta = rope_theta self.rope_scaling = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, ) def _rope_scaling_validation(self): """ Validate the `rope_scaling` configuration. """ if self.rope_scaling is None: return if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2: raise ValueError( "`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}" ) rope_scaling_type = self.rope_scaling.get("type", None) rope_scaling_factor = self.rope_scaling.get("factor", None) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
transformers/src/transformers/models/deprecated/open_llama/configuration_open_llama.py/0
{ "file_path": "transformers/src/transformers/models/deprecated/open_llama/configuration_open_llama.py", "repo_id": "transformers", "token_count": 3009 }
104
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Transformer XL model. Adapted from https://github.com/kimiyoung/transformer-xl. In particular https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/mem_transformer.py """ import warnings from dataclasses import dataclass from typing import List, Optional, Tuple, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ....modeling_utils import PreTrainedModel from ....utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, ) from .configuration_transfo_xl import TransfoXLConfig from .modeling_transfo_xl_utilities import ProjectedAdaptiveLogSoftmax logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "transfo-xl/transfo-xl-wt103" _CONFIG_FOR_DOC = "TransfoXLConfig" def build_tf_to_pytorch_map(model, config): """ A map of modules from TF to PyTorch. This time I use a map to keep the PyTorch model as identical to the original PyTorch model as possible. """ tf_to_pt_map = {} if hasattr(model, "transformer"): # We are loading in a TransfoXLLMHeadModel => we will load also the Adaptive Softmax tf_to_pt_map.update( { "transformer/adaptive_softmax/cutoff_0/cluster_W": model.crit.cluster_weight, "transformer/adaptive_softmax/cutoff_0/cluster_b": model.crit.cluster_bias, } ) for i, (out_l, proj_l, tie_proj) in enumerate( zip(model.crit.out_layers, model.crit.out_projs, config.tie_projs) ): layer_str = f"transformer/adaptive_softmax/cutoff_{i}/" if config.tie_word_embeddings: tf_to_pt_map.update({layer_str + "b": out_l.bias}) else: raise NotImplementedError # I don't think this is implemented in the TF code tf_to_pt_map.update({layer_str + "lookup_table": out_l.weight, layer_str + "b": out_l.bias}) if not tie_proj: tf_to_pt_map.update({layer_str + "proj": proj_l}) # Now load the rest of the transformer model = model.transformer # Embeddings for i, (embed_l, proj_l) in enumerate(zip(model.word_emb.emb_layers, model.word_emb.emb_projs)): layer_str = f"transformer/adaptive_embed/cutoff_{i}/" tf_to_pt_map.update({layer_str + "lookup_table": embed_l.weight, layer_str + "proj_W": proj_l}) # Transformer blocks for i, b in enumerate(model.layers): layer_str = f"transformer/layer_{i}/" tf_to_pt_map.update( { layer_str + "rel_attn/LayerNorm/gamma": b.dec_attn.layer_norm.weight, layer_str + "rel_attn/LayerNorm/beta": b.dec_attn.layer_norm.bias, layer_str + "rel_attn/o/kernel": b.dec_attn.o_net.weight, layer_str + "rel_attn/qkv/kernel": b.dec_attn.qkv_net.weight, layer_str + "rel_attn/r/kernel": b.dec_attn.r_net.weight, layer_str + "ff/LayerNorm/gamma": b.pos_ff.layer_norm.weight, layer_str + "ff/LayerNorm/beta": b.pos_ff.layer_norm.bias, layer_str + "ff/layer_1/kernel": b.pos_ff.CoreNet[0].weight, layer_str + "ff/layer_1/bias": b.pos_ff.CoreNet[0].bias, layer_str + "ff/layer_2/kernel": b.pos_ff.CoreNet[3].weight, layer_str + "ff/layer_2/bias": b.pos_ff.CoreNet[3].bias, } ) # Relative positioning biases if config.untie_r: r_r_list = [] r_w_list = [] for b in model.layers: r_r_list.append(b.dec_attn.r_r_bias) r_w_list.append(b.dec_attn.r_w_bias) else: r_r_list = [model.r_r_bias] r_w_list = [model.r_w_bias] tf_to_pt_map.update({"transformer/r_r_bias": r_r_list, "transformer/r_w_bias": r_w_list}) return tf_to_pt_map def load_tf_weights_in_transfo_xl(model, config, tf_path): """Load tf checkpoints in a pytorch model""" try: import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise # Build TF to PyTorch weights loading map tf_to_pt_map = build_tf_to_pytorch_map(model, config) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) tf_weights = {} for name, shape in init_vars: logger.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(tf_path, name) tf_weights[name] = array for name, pointer in tf_to_pt_map.items(): assert name in tf_weights array = tf_weights[name] # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if "kernel" in name or "proj" in name: array = np.transpose(array) if ("r_r_bias" in name or "r_w_bias" in name) and len(pointer) > 1: # Here we will split the TF weights assert len(pointer) == array.shape[0] for i, p_i in enumerate(pointer): arr_i = array[i, ...] try: assert p_i.shape == arr_i.shape except AssertionError as e: e.args += (p_i.shape, arr_i.shape) raise logger.info(f"Initialize PyTorch weight {name} for layer {i}") p_i.data = torch.from_numpy(arr_i) else: try: assert ( pointer.shape == array.shape ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info(f"Initialize PyTorch weight {name}") pointer.data = torch.from_numpy(array) tf_weights.pop(name, None) tf_weights.pop(name + "/Adam", None) tf_weights.pop(name + "/Adam_1", None) logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}") return model class PositionalEmbedding(nn.Module): def __init__(self, demb): super().__init__() self.demb = demb inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb)) self.register_buffer("inv_freq", inv_freq) def forward(self, pos_seq, bsz=None): sinusoid_inp = torch.outer(pos_seq, self.inv_freq) pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1) if bsz is not None: return pos_emb[:, None, :].expand(-1, bsz, -1) else: return pos_emb[:, None, :] class PositionwiseFF(nn.Module): def __init__(self, d_model, d_inner, dropout, pre_lnorm=False, layer_norm_epsilon=1e-5): super().__init__() self.d_model = d_model self.d_inner = d_inner self.dropout = dropout self.CoreNet = nn.Sequential( nn.Linear(d_model, d_inner), nn.ReLU(inplace=True), nn.Dropout(dropout), nn.Linear(d_inner, d_model), nn.Dropout(dropout), ) self.layer_norm = nn.LayerNorm(d_model, eps=layer_norm_epsilon) self.pre_lnorm = pre_lnorm def forward(self, inp): if self.pre_lnorm: # layer normalization + positionwise feed-forward core_out = self.CoreNet(self.layer_norm(inp)) # residual connection output = core_out + inp else: # positionwise feed-forward core_out = self.CoreNet(inp) # residual connection + layer normalization output = self.layer_norm(inp + core_out) return output class RelPartialLearnableMultiHeadAttn(nn.Module): def __init__( self, n_head, d_model, d_head, dropout, dropatt=0, pre_lnorm=False, r_r_bias=None, r_w_bias=None, layer_norm_epsilon=1e-5, ): super().__init__() self.n_head = n_head self.d_model = d_model self.d_head = d_head self.dropout = dropout self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head, bias=False) self.drop = nn.Dropout(dropout) self.dropatt = nn.Dropout(dropatt) self.o_net = nn.Linear(n_head * d_head, d_model, bias=False) self.layer_norm = nn.LayerNorm(d_model, eps=layer_norm_epsilon) self.scale = 1 / (d_head**0.5) self.pre_lnorm = pre_lnorm if r_r_bias is None or r_w_bias is None: # Biases are not shared self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head)) self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head)) else: self.r_r_bias = r_r_bias self.r_w_bias = r_w_bias self.r_net = nn.Linear(self.d_model, self.n_head * self.d_head, bias=False) def _rel_shift(self, x): zero_pad_shape = (x.size(0), 1) + x.size()[2:] zero_pad = torch.zeros(zero_pad_shape, device=x.device, dtype=x.dtype) x_padded = torch.cat([zero_pad, x], dim=1) x_padded_shape = (x.size(1) + 1, x.size(0)) + x.size()[2:] x_padded = x_padded.view(*x_padded_shape) x = x_padded[1:].view_as(x) return x def forward(self, w, r, attn_mask=None, mems=None, head_mask=None, output_attentions=False): qlen, rlen, bsz = w.size(0), r.size(0), w.size(1) if mems is not None: cat = torch.cat([mems, w], 0) if self.pre_lnorm: w_heads = self.qkv_net(self.layer_norm(cat)) else: w_heads = self.qkv_net(cat) r_head_k = self.r_net(r) w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1) w_head_q = w_head_q[-qlen:] else: if self.pre_lnorm: w_heads = self.qkv_net(self.layer_norm(w)) else: w_heads = self.qkv_net(w) r_head_k = self.r_net(r) w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1) klen = w_head_k.size(0) w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head r_head_k = r_head_k.view(rlen, self.n_head, self.d_head) # qlen x n_head x d_head # compute attention score rw_head_q = w_head_q + self.r_w_bias # qlen x bsz x n_head x d_head AC = torch.einsum("ibnd,jbnd->ijbn", (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head rr_head_q = w_head_q + self.r_r_bias BD = torch.einsum("ibnd,jnd->ijbn", (rr_head_q, r_head_k)) # qlen x klen x bsz x n_head BD = self._rel_shift(BD) # [qlen x klen x bsz x n_head] attn_score = AC + BD attn_score.mul_(self.scale) mask_value = torch.finfo(attn_score.dtype).min # compute attention probability if attn_mask is not None and torch.sum(attn_mask).item(): attn_mask = attn_mask == 1 # Switch to bool if attn_mask.dim() == 2: attn_score = ( attn_score.float().masked_fill(attn_mask[None, :, :, None], mask_value).type_as(attn_score) ) elif attn_mask.dim() == 3: attn_score = attn_score.float().masked_fill(attn_mask[:, :, :, None], mask_value).type_as(attn_score) # [qlen x klen x bsz x n_head] attn_prob = nn.functional.softmax(attn_score, dim=1) attn_prob = self.dropatt(attn_prob) # Mask heads if we want to if head_mask is not None: attn_prob = attn_prob * head_mask # compute attention vector attn_vec = torch.einsum("ijbn,jbnd->ibnd", (attn_prob, w_head_v)) # [qlen x bsz x n_head x d_head] attn_vec = attn_vec.contiguous().view(attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head) # linear projection attn_out = self.o_net(attn_vec) attn_out = self.drop(attn_out) if self.pre_lnorm: # residual connection outputs = [w + attn_out] else: # residual connection + layer normalization outputs = [self.layer_norm(w + attn_out)] if output_attentions: outputs.append(attn_prob) return outputs class RelPartialLearnableDecoderLayer(nn.Module): def __init__(self, n_head, d_model, d_head, d_inner, dropout, layer_norm_epsilon=1e-5, **kwargs): super().__init__() self.dec_attn = RelPartialLearnableMultiHeadAttn( n_head, d_model, d_head, dropout, layer_norm_epsilon=layer_norm_epsilon, **kwargs ) self.pos_ff = PositionwiseFF( d_model, d_inner, dropout, pre_lnorm=kwargs.get("pre_lnorm"), layer_norm_epsilon=layer_norm_epsilon ) def forward(self, dec_inp, r, dec_attn_mask=None, mems=None, head_mask=None, output_attentions=False): attn_outputs = self.dec_attn( dec_inp, r, attn_mask=dec_attn_mask, mems=mems, head_mask=head_mask, output_attentions=output_attentions, ) ff_output = self.pos_ff(attn_outputs[0]) outputs = [ff_output] + attn_outputs[1:] return outputs class AdaptiveEmbedding(nn.Module): def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, sample_softmax=False): super().__init__() self.n_token = n_token self.d_embed = d_embed self.cutoffs = cutoffs + [n_token] self.div_val = div_val self.d_proj = d_proj self.emb_scale = d_proj**0.5 self.cutoff_ends = [0] + self.cutoffs self.emb_layers = nn.ModuleList() self.emb_projs = nn.ParameterList() if div_val == 1: self.emb_layers.append(nn.Embedding(n_token, d_embed, sparse=sample_softmax > 0)) if d_proj != d_embed: self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed))) else: for i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] d_emb_i = d_embed // (div_val**i) self.emb_layers.append(nn.Embedding(r_idx - l_idx, d_emb_i)) self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i))) def forward(self, inp): if self.div_val == 1: embed = self.emb_layers[0](inp) if self.d_proj != self.d_embed: embed = nn.functional.linear(embed, self.emb_projs[0]) else: param = next(self.parameters()) inp_flat = inp.view(-1) emb_flat = torch.zeros([inp_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device) for i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx) indices_i = mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue inp_i = inp_flat.index_select(0, indices_i) - l_idx emb_i = self.emb_layers[i](inp_i) emb_i = nn.functional.linear(emb_i, self.emb_projs[i]) emb_flat.index_copy_(0, indices_i, emb_i) embed_shape = inp.size() + (self.d_proj,) embed = emb_flat.view(embed_shape) embed.mul_(self.emb_scale) return embed class TransfoXLPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = TransfoXLConfig load_tf_weights = load_tf_weights_in_transfo_xl base_model_prefix = "transformer" def _init_weight(self, weight): if self.config.init == "uniform": nn.init.uniform_(weight, -self.config.init_range, self.config.init_range) elif self.config.init == "normal": nn.init.normal_(weight, 0.0, self.config.init_std) def _init_bias(self, bias): nn.init.constant_(bias, 0.0) def _init_weights(self, m): """Initialize the weights.""" classname = m.__class__.__name__ if classname.find("Linear") != -1: if hasattr(m, "weight") and m.weight is not None: self._init_weight(m.weight) if hasattr(m, "bias") and m.bias is not None: self._init_bias(m.bias) elif classname.find("AdaptiveEmbedding") != -1: if hasattr(m, "emb_projs"): for i in range(len(m.emb_projs)): if m.emb_projs[i] is not None: nn.init.normal_(m.emb_projs[i], 0.0, self.config.proj_init_std) elif classname.find("Embedding") != -1: if hasattr(m, "weight"): self._init_weight(m.weight) elif classname.find("ProjectedAdaptiveLogSoftmax") != -1: if hasattr(m, "cluster_weight") and m.cluster_weight is not None: self._init_weight(m.cluster_weight) if hasattr(m, "cluster_bias") and m.cluster_bias is not None: self._init_bias(m.cluster_bias) if hasattr(m, "out_projs"): for i in range(len(m.out_projs)): if m.out_projs[i] is not None: nn.init.normal_(m.out_projs[i], 0.0, self.config.proj_init_std) elif classname.find("LayerNorm") != -1: if hasattr(m, "weight"): nn.init.normal_(m.weight, 1.0, self.config.init_std) if hasattr(m, "bias") and m.bias is not None: self._init_bias(m.bias) else: if hasattr(m, "r_emb"): self._init_weight(m.r_emb) if hasattr(m, "r_w_bias"): self._init_weight(m.r_w_bias) if hasattr(m, "r_r_bias"): self._init_weight(m.r_r_bias) if hasattr(m, "r_bias"): self._init_bias(m.r_bias) def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, layer: Optional[int] = -1): """ Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size. Take care of tying weights embeddings afterwards if the model class has a *tie_weights()* method. Arguments: new_num_tokens: (*optional*) int: New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or None: does nothing and just returns a pointer to the input tokens `torch.nn.Embeddings` Module of the model. layer: (*optional*) int: Layer of the *AdaptiveEmbedding* where the resizing should be done. Per default the last layer will be resized. Be aware that when resizing other than the last layer, you have to ensure that the new token(s) in the tokenizer are at the corresponding position. Return: `torch.nn.Embeddings` Pointer to the input tokens Embeddings Module of the model """ base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed if new_num_tokens is None: return self.get_input_embeddings() new_num_tokens_layer, layer = self._get_new_num_tokens_layer(new_num_tokens, layer) assert new_num_tokens_layer > 0, "The size of the new embedding layer cannot be 0 or less" model_embeds = base_model._resize_token_embeddings(new_num_tokens_layer, layer) # Update base model and current model config self.config.vocab_size = new_num_tokens base_model.vocab_size = new_num_tokens base_model.n_token = new_num_tokens new_embedding_shapes = self._get_embedding_shapes() self._resize_cutoffs(new_num_tokens, new_num_tokens_layer, new_embedding_shapes, layer) # Tie weights again if needed self.tie_weights() return model_embeds def _get_new_num_tokens_layer(self, new_num_tokens, layer): embeddings = self.get_input_embeddings() if layer == -1: layer = len(embeddings.emb_layers) - 1 assert 0 <= layer <= len(embeddings.emb_layers) - 1 new_num_tokens_layer = ( new_num_tokens - sum([emb.weight.shape[0] for emb in embeddings.emb_layers[:layer]]) - sum([emb.weight.shape[0] for emb in embeddings.emb_layers[layer + 1 :]]) ) return new_num_tokens_layer, layer def _get_embedding_shapes(self): embeddings = self.get_input_embeddings() return [emb.weight.shape[0] for emb in embeddings.emb_layers] def _resize_token_embeddings(self, new_num_tokens, layer=-1): embeddings = self.get_input_embeddings() if new_num_tokens is None: return embeddings new_embeddings_layer = self._get_resized_embeddings(embeddings.emb_layers[layer], new_num_tokens) embeddings.emb_layers[layer] = new_embeddings_layer self.set_input_embeddings(embeddings) return self.get_input_embeddings() def _resize_cutoffs(self, new_num_tokens, new_emb_size, new_embedding_shapes, layer): embeddings = self.get_input_embeddings() for i in range(layer, len(embeddings.cutoffs)): embeddings.cutoffs[i] = sum(new_embedding_shapes[: i + 1]) embeddings.cutoff_ends = [0] + embeddings.cutoffs embeddings.n_token = new_num_tokens self.config.cutoffs = embeddings.cutoffs[:-1] return embeddings.cutoffs @dataclass class TransfoXLModelOutput(ModelOutput): """ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding). Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. mems (`List[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as input ids as they have already been computed. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: torch.FloatTensor mems: List[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class TransfoXLSequenceClassifierOutputWithPast(ModelOutput): """ Base class for outputs of sentence classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). mems (`List[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as input ids as they have already been computed. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None mems: List[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class TransfoXLLMHeadModelOutput(ModelOutput): """ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding). Args: losses (`torch.FloatTensor` of shape *(batch_size, sequence_length-1)*, *optional*, returned when `labels` is provided): Language modeling losses (not reduced). prediction_scores (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token after SoftMax). mems (`List[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as input ids as they have already been computed. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. loss (`torch.FloatTensor` of shape `()`, *optional*, returned when `labels` is provided) Reduced language modeling loss. """ losses: Optional[torch.FloatTensor] = None prediction_scores: torch.FloatTensor = None mems: List[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None loss: Optional[torch.FloatTensor] = None @property def logits(self): # prediction scores are the output of the adaptive softmax, see # the file `modeling_transfo_xl_utilities`. Since the adaptive # softmax returns the log softmax value, `self.prediction_scores` # are strictly speaking not exactly `logits`, but behave the same # way logits do. return self.prediction_scores TRANSFO_XL_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`TransfoXLConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ TRANSFO_XL_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) mems (`List[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see `mems` output below). Can be used to speed up sequential decoding. The token ids which have their mems given to this model should not be passed as `input_ids` as they have already been computed. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare Bert Model transformer outputting raw hidden-states without any specific head on top.", TRANSFO_XL_START_DOCSTRING, ) class TransfoXLModel(TransfoXLPreTrainedModel): def __init__(self, config): super().__init__(config) self.n_token = config.vocab_size self.d_embed = config.d_embed self.d_model = config.d_model self.n_head = config.n_head self.d_head = config.d_head self.word_emb = AdaptiveEmbedding( config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val ) self.drop = nn.Dropout(config.dropout) self.n_layer = config.n_layer self.mem_len = config.mem_len self.attn_type = config.attn_type if not config.untie_r: self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head)) self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head)) self.layers = nn.ModuleList() if config.attn_type == 0: # the default attention for i in range(config.n_layer): self.layers.append( RelPartialLearnableDecoderLayer( config.n_head, config.d_model, config.d_head, config.d_inner, config.dropout, dropatt=config.dropatt, pre_lnorm=config.pre_lnorm, r_w_bias=None if config.untie_r else self.r_w_bias, r_r_bias=None if config.untie_r else self.r_r_bias, layer_norm_epsilon=config.layer_norm_epsilon, ) ) else: # learnable embeddings and absolute embeddings are not used in our pretrained checkpoints raise NotImplementedError # Removed them to avoid maintaining dead code self.same_length = config.same_length self.clamp_len = config.clamp_len if self.attn_type == 0: # default attention self.pos_emb = PositionalEmbedding(self.d_model) else: # learnable embeddings and absolute embeddings raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.word_emb def set_input_embeddings(self, new_embeddings): self.word_emb = new_embeddings def backward_compatible(self): self.sample_softmax = -1 def reset_memory_length(self, mem_len): self.mem_len = mem_len def _prune_heads(self, heads): logger.info("Head pruning is not implemented for Transformer-XL model") pass def init_mems(self, bsz): if self.mem_len > 0: mems = [] param = next(self.parameters()) for i in range(self.n_layer): empty = torch.zeros(self.mem_len, bsz, self.config.d_model, dtype=param.dtype, device=param.device) mems.append(empty) return mems else: return None def _update_mems(self, hids, mems, mlen, qlen): # does not deal with None if mems is None: return None # mems is not None assert len(hids) == len(mems), "len(hids) != len(mems)" # There are `mlen + qlen` steps that can be cached into mems with torch.no_grad(): new_mems = [] end_idx = mlen + max(0, qlen) beg_idx = max(0, end_idx - self.mem_len) for i in range(len(hids)): cat = torch.cat([mems[i], hids[i]], dim=0) new_mems.append(cat[beg_idx:end_idx].detach()) return new_mems @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TransfoXLModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, mems: Optional[List[torch.FloatTensor]] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, TransfoXLModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library # so we transpose here from shape [bsz, len] to shape [len, bsz] if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_ids = input_ids.transpose(0, 1).contiguous() qlen, bsz = input_ids.size() elif inputs_embeds is not None: inputs_embeds = inputs_embeds.transpose(0, 1).contiguous() qlen, bsz = inputs_embeds.shape[0], inputs_embeds.shape[1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if mems is None: mems = self.init_mems(bsz) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer) # and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head] if head_mask is not None: if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0) head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1) head_mask = head_mask.to( dtype=next(self.parameters()).dtype ) # switch to float if need + fp16 compatibility else: head_mask = [None] * self.n_layer if inputs_embeds is not None: word_emb = inputs_embeds else: word_emb = self.word_emb(input_ids) mlen = mems[0].size(0) if mems is not None else 0 klen = mlen + qlen if self.same_length: all_ones = word_emb.new_ones((qlen, klen), dtype=torch.bool) mask_len = klen - self.mem_len if mask_len > 0: mask_shift_len = qlen - mask_len else: mask_shift_len = qlen dec_attn_mask = (torch.triu(all_ones, 1 + mlen) + torch.tril(all_ones, -mask_shift_len))[:, :, None] # -1 else: dec_attn_mask = torch.triu(word_emb.new_ones((qlen, klen), dtype=torch.bool), diagonal=1 + mlen)[ :, :, None ] hids = [] attentions = [] if output_attentions else None if self.attn_type == 0: # default pos_seq = torch.arange(klen - 1, -1, -1.0, device=word_emb.device, dtype=torch.int64).type_as( dtype=word_emb.dtype ) if self.clamp_len > 0: pos_seq.clamp_(max=self.clamp_len) pos_emb = self.pos_emb(pos_seq) core_out = self.drop(word_emb) pos_emb = self.drop(pos_emb) for i, layer in enumerate(self.layers): hids.append(core_out) mems_i = None if mems is None else mems[i] layer_outputs = layer( core_out, pos_emb, dec_attn_mask=dec_attn_mask, mems=mems_i, head_mask=head_mask[i], output_attentions=output_attentions, ) core_out = layer_outputs[0] if output_attentions: attentions.append(layer_outputs[1]) else: # learnable embeddings and absolute embeddings raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint core_out = self.drop(core_out) new_mems = self._update_mems(hids, mems, mlen, qlen) if output_hidden_states: # Add last layer and transpose to library standard shape [bsz, len, hidden_dim] hids.append(core_out) hids = tuple(t.transpose(0, 1).contiguous() for t in hids) else: hids = None if output_attentions: # Transpose to library standard shape [bsz, n_heads, query_seq_len, key_seq_len] attentions = tuple(t.permute(2, 3, 0, 1).contiguous() for t in attentions) # We transpose back here to shape [bsz, len, hidden_dim] core_out = core_out.transpose(0, 1).contiguous() if not return_dict: return tuple(v for v in [core_out, new_mems, hids, attentions] if v is not None) return TransfoXLModelOutput( last_hidden_state=core_out, mems=new_mems, hidden_states=hids, attentions=attentions, ) @add_start_docstrings( """ The Transformer-XL Model with a language modeling head on top (adaptive softmax with weights tied to the adaptive input embeddings) """, TRANSFO_XL_START_DOCSTRING, ) class TransfoXLLMHeadModel(TransfoXLPreTrainedModel): _tied_weights_keys = [r"crit\.out_projs\.\d+", r"crit\.out_layers\.\d+\.weight"] def __init__(self, config): super().__init__(config) self.transformer = TransfoXLModel(config) self.sample_softmax = config.sample_softmax self.trainer_compatible = getattr(config, "trainer_compatible", False) if not self.trainer_compatible: warnings.warn( "The output of TransfoXL will be updated in v5 to support a single loss as first argument. In order " "to use that updated output, please specify `trainer_compatible=True` as your configuration" " attribute.", DeprecationWarning, ) assert self.sample_softmax <= 0, ( "Sampling from the softmax is not implemented yet. Please look at issue: #3310:" " https://github.com/huggingface/transformers/issues/3310" ) self.crit = ProjectedAdaptiveLogSoftmax( config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val ) # Initialize weights and apply final processing self.post_init() def tie_weights(self): """ Run this to be sure output and input (adaptive) softmax weights are tied """ if self.config.tie_word_embeddings: for i in range(len(self.crit.out_layers)): self._tie_or_clone_weights(self.crit.out_layers[i], self.transformer.word_emb.emb_layers[i]) if self.config.tie_projs: for i, tie_proj in enumerate(self.config.tie_projs): if tie_proj and self.config.div_val == 1 and self.config.d_model != self.config.d_embed: if self.config.torchscript: self.crit.out_projs[i] = nn.Parameter(self.transformer.word_emb.emb_projs[0].clone()) else: self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[0] elif tie_proj and self.config.div_val != 1: if self.config.torchscript: self.crit.out_projs[i] = nn.Parameter(self.transformer.word_emb.emb_projs[i].clone()) else: self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[i] def reset_memory_length(self, mem_len): self.transformer.reset_memory_length(mem_len) def init_mems(self, bsz): return self.transformer.init_mems(bsz) @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TransfoXLLMHeadModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, mems: Optional[List[torch.FloatTensor]] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, TransfoXLLMHeadModelOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None: bsz, tgt_len = input_ids.size(0), input_ids.size(1) elif inputs_embeds is not None: bsz, tgt_len = inputs_embeds.size(0), inputs_embeds.size(1) else: raise ValueError("You have to specify either input_ids or inputs_embeds") transformer_outputs = self.transformer( input_ids, mems=mems, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden = transformer_outputs[0] pred_hid = last_hidden[:, -tgt_len:] if labels is not None: # Prevents all labels being -100 and throwing an error # when backwarding the loss miss_valid_label = labels[0, 1:].sum() == (labels.size(1) - 1) * -100 if miss_valid_label: # Sets an <EOS> token, just to prevent loss from being NaN labels[0, 1] = self.config.eos_token_id softmax_output = self.crit(pred_hid, labels) prediction_scores = softmax_output.view(bsz, tgt_len, -1) if labels is None else () if labels is not None: losses = softmax_output.view(bsz, tgt_len - 1) # Avoids from incorporating padding (-100) tokens into loss value loss = losses[losses != 0].mean() else: losses, loss = None, None if not return_dict: if self.trainer_compatible: output = (prediction_scores, losses) if losses is not None else (prediction_scores,) output += transformer_outputs[1:] return ((loss,) + output) if loss is not None else output else: output = (prediction_scores, *transformer_outputs[1:]) output = ((losses,) + output) if losses is not None else output return (output + (loss,)) if loss is not None else output return TransfoXLLMHeadModelOutput( loss=loss, prediction_scores=prediction_scores, losses=losses, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) def get_output_embeddings(self): """Double-check if you are using adaptive softmax.""" if self.sample_softmax > 0: return self.out_layer else: return self.crit.out_layers[-1] def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **model_kwargs): inputs = {} # if past is defined in model kwargs then use it for faster decoding if past_key_values: inputs["mems"] = past_key_values inputs["input_ids"] = input_ids[:, -1].unsqueeze(-1) else: inputs["input_ids"] = input_ids return inputs def _resize_cutoffs(self, new_num_tokens, new_emb_size, new_embedding_shapes, layer): new_cutoffs = super()._resize_cutoffs(new_num_tokens, new_emb_size, new_embedding_shapes, layer) self.crit.cutoffs = new_cutoffs self.crit.cutoff_ends = [0] + new_cutoffs self.crit.n_token = new_num_tokens @staticmethod def _reorder_cache(mems: List[torch.Tensor], beam_idx: torch.Tensor) -> List[torch.Tensor]: """ This function is used to re-order the `mems` cache if [`~PreTrainedModel.beam_search`] or [`~PreTrainedModel.beam_sample`] is called. This is required to match `mems` with the correct beam_idx at every generation step. """ return [layer_past.index_select(1, beam_idx.to(layer_past.device)) for layer_past in mems] @add_start_docstrings( """ The Transformer-XL Model transformer with a sequence classification head on top (linear layer). [`TransfoXLForSequenceClassification`] uses the last token in order to do the classification, as other causal models (e.g. GPT-1) do. Since it does classification on the last token, it requires to know the position of the last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in each row of the batch). """, TRANSFO_XL_START_DOCSTRING, ) class TransfoXLForSequenceClassification(TransfoXLPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = TransfoXLModel(config) self.score = nn.Linear(config.d_embed, self.num_labels, bias=False) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TransfoXLSequenceClassifierOutputWithPast, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, mems: Optional[List[torch.FloatTensor]] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, TransfoXLSequenceClassifierOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, mems=mems, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size, sequence_length = input_ids.shape[:2] else: batch_size, sequence_length = inputs_embeds.shape[:2] assert ( self.config.pad_token_id is not None or batch_size == 1 ), "Cannot handle batch sizes > 1 if no padding token is defined." if self.config.pad_token_id is None: sequence_lengths = -1 else: if input_ids is not None: # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 sequence_lengths = sequence_lengths % input_ids.shape[-1] sequence_lengths = sequence_lengths.to(logits.device) else: sequence_lengths = -1 logger.warning_once( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) pooled_logits = logits[range(batch_size), sequence_lengths] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TransfoXLSequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, )
transformers/src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py/0
{ "file_path": "transformers/src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py", "repo_id": "transformers", "token_count": 25694 }
105
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Donut checkpoints using the original `donut-python` library. URL: https://github.com/clovaai/donut""" import argparse import torch from datasets import load_dataset from donut import DonutModel from transformers import ( DonutImageProcessor, DonutProcessor, DonutSwinConfig, DonutSwinModel, MBartConfig, MBartForCausalLM, VisionEncoderDecoderModel, XLMRobertaTokenizerFast, ) def get_configs(model): original_config = model.config encoder_config = DonutSwinConfig( image_size=original_config.input_size, patch_size=4, depths=original_config.encoder_layer, num_heads=[4, 8, 16, 32], window_size=original_config.window_size, embed_dim=128, ) decoder_config = MBartConfig( is_decoder=True, is_encoder_decoder=False, add_cross_attention=True, decoder_layers=original_config.decoder_layer, max_position_embeddings=original_config.max_position_embeddings, vocab_size=len( model.decoder.tokenizer ), # several special tokens are added to the vocab of XLMRobertaTokenizer, see repo on the hub (added_tokens.json) scale_embedding=True, add_final_layer_norm=True, ) return encoder_config, decoder_config def rename_key(name): if "encoder.model" in name: name = name.replace("encoder.model", "encoder") if "decoder.model" in name: name = name.replace("decoder.model", "decoder") if "patch_embed.proj" in name: name = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection") if "patch_embed.norm" in name: name = name.replace("patch_embed.norm", "embeddings.norm") if name.startswith("encoder"): if "layers" in name: name = "encoder." + name if "attn.proj" in name: name = name.replace("attn.proj", "attention.output.dense") if "attn" in name and "mask" not in name: name = name.replace("attn", "attention.self") if "norm1" in name: name = name.replace("norm1", "layernorm_before") if "norm2" in name: name = name.replace("norm2", "layernorm_after") if "mlp.fc1" in name: name = name.replace("mlp.fc1", "intermediate.dense") if "mlp.fc2" in name: name = name.replace("mlp.fc2", "output.dense") if name == "encoder.norm.weight": name = "encoder.layernorm.weight" if name == "encoder.norm.bias": name = "encoder.layernorm.bias" return name def convert_state_dict(orig_state_dict, model): for key in orig_state_dict.copy().keys(): val = orig_state_dict.pop(key) if "qkv" in key: key_split = key.split(".") layer_num = int(key_split[3]) block_num = int(key_split[5]) dim = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: orig_state_dict[ f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.weight" ] = val[:dim, :] orig_state_dict[f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.weight"] = ( val[dim : dim * 2, :] ) orig_state_dict[ f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.weight" ] = val[-dim:, :] else: orig_state_dict[f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.bias"] = ( val[:dim] ) orig_state_dict[f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.bias"] = ( val[dim : dim * 2] ) orig_state_dict[f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.bias"] = ( val[-dim:] ) elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]: # HuggingFace implementation doesn't use attn_mask buffer # and model doesn't use final LayerNorms for the encoder pass else: orig_state_dict[rename_key(key)] = val return orig_state_dict def convert_donut_checkpoint(model_name, pytorch_dump_folder_path=None, push_to_hub=False): # load original model original_model = DonutModel.from_pretrained(model_name).eval() # load HuggingFace model encoder_config, decoder_config = get_configs(original_model) encoder = DonutSwinModel(encoder_config) decoder = MBartForCausalLM(decoder_config) model = VisionEncoderDecoderModel(encoder=encoder, decoder=decoder) model.eval() state_dict = original_model.state_dict() new_state_dict = convert_state_dict(state_dict, model) model.load_state_dict(new_state_dict) # verify results on scanned document dataset = load_dataset("hf-internal-testing/example-documents") # no-script image = dataset["test"][0]["image"].convert("RGB") tokenizer = XLMRobertaTokenizerFast.from_pretrained(model_name, from_slow=True) image_processor = DonutImageProcessor( do_align_long_axis=original_model.config.align_long_axis, size=original_model.config.input_size[::-1] ) processor = DonutProcessor(image_processor, tokenizer) pixel_values = processor(image, return_tensors="pt").pixel_values if model_name == "naver-clova-ix/donut-base-finetuned-docvqa": task_prompt = "<s_docvqa><s_question>{user_input}</s_question><s_answer>" question = "When is the coffee break?" task_prompt = task_prompt.replace("{user_input}", question) elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip": task_prompt = "<s_rvlcdip>" elif model_name in [ "naver-clova-ix/donut-base-finetuned-cord-v1", "naver-clova-ix/donut-base-finetuned-cord-v1-2560", ]: task_prompt = "<s_cord>" elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2": task_prompt = "s_cord-v2>" elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket": task_prompt = "<s_zhtrainticket>" elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]: # use a random prompt task_prompt = "hello world" else: raise ValueError("Model name not supported") prompt_tensors = original_model.decoder.tokenizer(task_prompt, add_special_tokens=False, return_tensors="pt")[ "input_ids" ] original_patch_embed = original_model.encoder.model.patch_embed(pixel_values) patch_embeddings, _ = model.encoder.embeddings(pixel_values) assert torch.allclose(original_patch_embed, patch_embeddings, atol=1e-3) # verify encoder hidden states original_last_hidden_state = original_model.encoder(pixel_values) last_hidden_state = model.encoder(pixel_values).last_hidden_state assert torch.allclose(original_last_hidden_state, last_hidden_state, atol=1e-2) # verify decoder hidden states original_logits = original_model(pixel_values, prompt_tensors, None).logits logits = model(pixel_values, decoder_input_ids=prompt_tensors).logits assert torch.allclose(original_logits, logits, atol=1e-3) print("Looks ok!") if pytorch_dump_folder_path is not None: print(f"Saving model and processor to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: model.push_to_hub("nielsr/" + model_name.split("/")[-1], commit_message="Update model") processor.push_to_hub("nielsr/" + model_name.split("/")[-1], commit_message="Update model") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="naver-clova-ix/donut-base-finetuned-docvqa", required=False, type=str, help="Name of the original model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, required=False, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model and processor to the 🤗 hub.", ) args = parser.parse_args() convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
transformers/src/transformers/models/donut/convert_donut_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/donut/convert_donut_to_pytorch.py", "repo_id": "transformers", "token_count": 4051 }
106
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert DPT checkpoints from the original repository. URL: https://github.com/isl-org/DPT""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_dpt_config(checkpoint_url): config = DPTConfig(embedding_type="hybrid") if "large" in checkpoint_url: config.hidden_size = 1024 config.intermediate_size = 4096 config.num_hidden_layers = 24 config.num_attention_heads = 16 config.backbone_out_indices = [5, 11, 17, 23] config.neck_hidden_sizes = [256, 512, 1024, 1024] expected_shape = (1, 384, 384) if "nyu" in checkpoint_url or "midas" in checkpoint_url: config.hidden_size = 768 config.reassemble_factors = [1, 1, 1, 0.5] config.neck_hidden_sizes = [256, 512, 768, 768] config.num_labels = 150 config.patch_size = 16 expected_shape = (1, 384, 384) config.use_batch_norm_in_fusion_residual = False config.readout_type = "project" if "ade" in checkpoint_url: config.use_batch_norm_in_fusion_residual = True config.hidden_size = 768 config.reassemble_stage = [1, 1, 1, 0.5] config.num_labels = 150 config.patch_size = 16 repo_id = "huggingface/label-files" filename = "ade20k-id2label.json" id2label = json.loads(Path(hf_hub_download(repo_id, filename, repo_type="dataset")).read_text()) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} expected_shape = [1, 150, 480, 480] return config, expected_shape def remove_ignore_keys_(state_dict): ignore_keys = ["pretrained.model.head.weight", "pretrained.model.head.bias"] for k in ignore_keys: state_dict.pop(k, None) def rename_key(name): if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): name = name.replace("pretrained.model", "dpt.encoder") if "pretrained.model" in name: name = name.replace("pretrained.model", "dpt.embeddings") if "patch_embed" in name: name = name.replace("patch_embed", "") if "pos_embed" in name: name = name.replace("pos_embed", "position_embeddings") if "attn.proj" in name: name = name.replace("attn.proj", "attention.output.dense") if "proj" in name and "project" not in name: name = name.replace("proj", "projection") if "blocks" in name: name = name.replace("blocks", "layer") if "mlp.fc1" in name: name = name.replace("mlp.fc1", "intermediate.dense") if "mlp.fc2" in name: name = name.replace("mlp.fc2", "output.dense") if "norm1" in name and "backbone" not in name: name = name.replace("norm1", "layernorm_before") if "norm2" in name and "backbone" not in name: name = name.replace("norm2", "layernorm_after") if "scratch.output_conv" in name: name = name.replace("scratch.output_conv", "head") if "scratch" in name: name = name.replace("scratch", "neck") if "layer1_rn" in name: name = name.replace("layer1_rn", "convs.0") if "layer2_rn" in name: name = name.replace("layer2_rn", "convs.1") if "layer3_rn" in name: name = name.replace("layer3_rn", "convs.2") if "layer4_rn" in name: name = name.replace("layer4_rn", "convs.3") if "refinenet" in name: layer_idx = int(name[len("neck.refinenet") : len("neck.refinenet") + 1]) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 name = name.replace(f"refinenet{layer_idx}", f"fusion_stage.layers.{abs(layer_idx-4)}") if "out_conv" in name: name = name.replace("out_conv", "projection") if "resConfUnit1" in name: name = name.replace("resConfUnit1", "residual_layer1") if "resConfUnit2" in name: name = name.replace("resConfUnit2", "residual_layer2") if "conv1" in name: name = name.replace("conv1", "convolution1") if "conv2" in name: name = name.replace("conv2", "convolution2") # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: name = name.replace("pretrained.act_postprocess1.0.project.0", "neck.reassemble_stage.readout_projects.0.0") if "pretrained.act_postprocess2.0.project.0" in name: name = name.replace("pretrained.act_postprocess2.0.project.0", "neck.reassemble_stage.readout_projects.1.0") if "pretrained.act_postprocess3.0.project.0" in name: name = name.replace("pretrained.act_postprocess3.0.project.0", "neck.reassemble_stage.readout_projects.2.0") if "pretrained.act_postprocess4.0.project.0" in name: name = name.replace("pretrained.act_postprocess4.0.project.0", "neck.reassemble_stage.readout_projects.3.0") # resize blocks if "pretrained.act_postprocess1.3" in name: name = name.replace("pretrained.act_postprocess1.3", "neck.reassemble_stage.layers.0.projection") if "pretrained.act_postprocess1.4" in name: name = name.replace("pretrained.act_postprocess1.4", "neck.reassemble_stage.layers.0.resize") if "pretrained.act_postprocess2.3" in name: name = name.replace("pretrained.act_postprocess2.3", "neck.reassemble_stage.layers.1.projection") if "pretrained.act_postprocess2.4" in name: name = name.replace("pretrained.act_postprocess2.4", "neck.reassemble_stage.layers.1.resize") if "pretrained.act_postprocess3.3" in name: name = name.replace("pretrained.act_postprocess3.3", "neck.reassemble_stage.layers.2.projection") if "pretrained.act_postprocess4.3" in name: name = name.replace("pretrained.act_postprocess4.3", "neck.reassemble_stage.layers.3.projection") if "pretrained.act_postprocess4.4" in name: name = name.replace("pretrained.act_postprocess4.4", "neck.reassemble_stage.layers.3.resize") if "pretrained" in name: name = name.replace("pretrained", "dpt") if "bn" in name: name = name.replace("bn", "batch_norm") if "head" in name: name = name.replace("head", "head.head") if "encoder.norm" in name: name = name.replace("encoder.norm", "layernorm") if "auxlayer" in name: name = name.replace("auxlayer", "auxiliary_head.head") if "backbone" in name: name = name.replace("backbone", "backbone.bit.encoder") if ".." in name: name = name.replace("..", ".") if "stem.conv" in name: name = name.replace("stem.conv", "bit.embedder.convolution") if "blocks" in name: name = name.replace("blocks", "layers") if "convolution" in name and "backbone" in name: name = name.replace("convolution", "conv") if "layer" in name and "backbone" in name: name = name.replace("layer", "layers") if "backbone.bit.encoder.bit" in name: name = name.replace("backbone.bit.encoder.bit", "backbone.bit") if "embedder.conv" in name: name = name.replace("embedder.conv", "embedder.convolution") if "backbone.bit.encoder.stem.norm" in name: name = name.replace("backbone.bit.encoder.stem.norm", "backbone.bit.embedder.norm") return name # we split up the matrix of each encoder layer into queries, keys and values def read_in_q_k_v(state_dict, config): for i in range(config.num_hidden_layers): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) in_proj_weight = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.weight") in_proj_bias = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"dpt.encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[: config.hidden_size, :] state_dict[f"dpt.encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[: config.hidden_size] state_dict[f"dpt.encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] state_dict[f"dpt.encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] state_dict[f"dpt.encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[ -config.hidden_size :, : ] state_dict[f"dpt.encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-config.hidden_size :] # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_dpt_checkpoint(checkpoint_url, pytorch_dump_folder_path, push_to_hub, model_name, show_prediction): """ Copy/paste/tweak model's weights to our DPT structure. """ # define DPT configuration based on URL config, expected_shape = get_dpt_config(checkpoint_url) # load original state_dict from URL # state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") state_dict = torch.load(checkpoint_url, map_location="cpu") # remove certain keys remove_ignore_keys_(state_dict) # rename keys for key in state_dict.copy().keys(): val = state_dict.pop(key) state_dict[rename_key(key)] = val # read in qkv matrices read_in_q_k_v(state_dict, config) # load HuggingFace model model = DPTForSemanticSegmentation(config) if "ade" in checkpoint_url else DPTForDepthEstimation(config) model.load_state_dict(state_dict) model.eval() # Check outputs on an image size = 480 if "ade" in checkpoint_url else 384 image_processor = DPTImageProcessor(size=size) image = prepare_img() encoding = image_processor(image, return_tensors="pt") # forward pass outputs = model(**encoding).logits if "ade" in checkpoint_url else model(**encoding).predicted_depth if show_prediction: prediction = ( torch.nn.functional.interpolate( outputs.unsqueeze(1), size=(image.size[1], image.size[0]), mode="bicubic", align_corners=False, ) .squeeze() .cpu() .numpy() ) Image.fromarray((prediction / prediction.max()) * 255).show() if pytorch_dump_folder_path is not None: Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving image processor to {pytorch_dump_folder_path}") image_processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: model.push_to_hub("ybelkada/dpt-hybrid-midas") image_processor.push_to_hub("ybelkada/dpt-hybrid-midas") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt", type=str, help="URL of the original DPT checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=False, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", action="store_true", ) parser.add_argument( "--model_name", default="dpt-large", type=str, help="Name of the model, in case you're pushing to the hub.", ) parser.add_argument( "--show_prediction", action="store_true", ) args = parser.parse_args() convert_dpt_checkpoint( args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction )
transformers/src/transformers/models/dpt/convert_dpt_hybrid_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/dpt/convert_dpt_hybrid_to_pytorch.py", "repo_id": "transformers", "token_count": 5463 }
107
# Copyright 2021 AlQuraishi Laboratory # Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Dict, Tuple, overload import torch import torch.types from torch import nn from . import residue_constants as rc from .rigid_utils import Rigid, Rotation from .tensor_utils import batched_gather @overload def pseudo_beta_fn(aatype: torch.Tensor, all_atom_positions: torch.Tensor, all_atom_masks: None) -> torch.Tensor: ... @overload def pseudo_beta_fn( aatype: torch.Tensor, all_atom_positions: torch.Tensor, all_atom_masks: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor]: ... def pseudo_beta_fn(aatype, all_atom_positions, all_atom_masks): is_gly = aatype == rc.restype_order["G"] ca_idx = rc.atom_order["CA"] cb_idx = rc.atom_order["CB"] pseudo_beta = torch.where( is_gly[..., None].expand(*((-1,) * len(is_gly.shape)), 3), all_atom_positions[..., ca_idx, :], all_atom_positions[..., cb_idx, :], ) if all_atom_masks is not None: pseudo_beta_mask = torch.where( is_gly, all_atom_masks[..., ca_idx], all_atom_masks[..., cb_idx], ) return pseudo_beta, pseudo_beta_mask else: return pseudo_beta def atom14_to_atom37(atom14: torch.Tensor, batch: Dict[str, torch.Tensor]) -> torch.Tensor: atom37_data = batched_gather( atom14, batch["residx_atom37_to_atom14"], dim=-2, no_batch_dims=len(atom14.shape[:-2]), ) atom37_data = atom37_data * batch["atom37_atom_exists"][..., None] return atom37_data def build_template_angle_feat(template_feats: Dict[str, torch.Tensor]) -> torch.Tensor: template_aatype = template_feats["template_aatype"] torsion_angles_sin_cos = template_feats["template_torsion_angles_sin_cos"] alt_torsion_angles_sin_cos = template_feats["template_alt_torsion_angles_sin_cos"] torsion_angles_mask = template_feats["template_torsion_angles_mask"] template_angle_feat = torch.cat( [ nn.functional.one_hot(template_aatype, 22), torsion_angles_sin_cos.reshape(*torsion_angles_sin_cos.shape[:-2], 14), alt_torsion_angles_sin_cos.reshape(*alt_torsion_angles_sin_cos.shape[:-2], 14), torsion_angles_mask, ], dim=-1, ) return template_angle_feat def build_template_pair_feat( batch: Dict[str, torch.Tensor], min_bin: torch.types.Number, max_bin: torch.types.Number, no_bins: int, use_unit_vector: bool = False, eps: float = 1e-20, inf: float = 1e8, ) -> torch.Tensor: template_mask = batch["template_pseudo_beta_mask"] template_mask_2d = template_mask[..., None] * template_mask[..., None, :] # Compute distogram (this seems to differ slightly from Alg. 5) tpb = batch["template_pseudo_beta"] dgram = torch.sum((tpb[..., None, :] - tpb[..., None, :, :]) ** 2, dim=-1, keepdim=True) lower = torch.linspace(min_bin, max_bin, no_bins, device=tpb.device) ** 2 upper = torch.cat([lower[1:], lower.new_tensor([inf])], dim=-1) dgram = ((dgram > lower) * (dgram < upper)).type(dgram.dtype) to_concat = [dgram, template_mask_2d[..., None]] aatype_one_hot: torch.LongTensor = nn.functional.one_hot( batch["template_aatype"], rc.restype_num + 2, ) n_res = batch["template_aatype"].shape[-1] to_concat.append(aatype_one_hot[..., None, :, :].expand(*aatype_one_hot.shape[:-2], n_res, -1, -1)) to_concat.append(aatype_one_hot[..., None, :].expand(*aatype_one_hot.shape[:-2], -1, n_res, -1)) n, ca, c = [rc.atom_order[a] for a in ["N", "CA", "C"]] rigids = Rigid.make_transform_from_reference( n_xyz=batch["template_all_atom_positions"][..., n, :], ca_xyz=batch["template_all_atom_positions"][..., ca, :], c_xyz=batch["template_all_atom_positions"][..., c, :], eps=eps, ) points = rigids.get_trans()[..., None, :, :] rigid_vec = rigids[..., None].invert_apply(points) inv_distance_scalar = torch.rsqrt(eps + torch.sum(rigid_vec**2, dim=-1)) t_aa_masks = batch["template_all_atom_mask"] template_mask = t_aa_masks[..., n] * t_aa_masks[..., ca] * t_aa_masks[..., c] template_mask_2d = template_mask[..., None] * template_mask[..., None, :] inv_distance_scalar = inv_distance_scalar * template_mask_2d unit_vector = rigid_vec * inv_distance_scalar[..., None] if not use_unit_vector: unit_vector = unit_vector * 0.0 to_concat.extend(torch.unbind(unit_vector[..., None, :], dim=-1)) to_concat.append(template_mask_2d[..., None]) act = torch.cat(to_concat, dim=-1) act = act * template_mask_2d[..., None] return act def build_extra_msa_feat(batch: Dict[str, torch.Tensor]) -> torch.Tensor: msa_1hot: torch.LongTensor = nn.functional.one_hot(batch["extra_msa"], 23) msa_feat = [ msa_1hot, batch["extra_has_deletion"].unsqueeze(-1), batch["extra_deletion_value"].unsqueeze(-1), ] return torch.cat(msa_feat, dim=-1) def torsion_angles_to_frames( r: Rigid, alpha: torch.Tensor, aatype: torch.Tensor, rrgdf: torch.Tensor, ) -> Rigid: # [*, N, 8, 4, 4] default_4x4 = rrgdf[aatype, ...] # [*, N, 8] transformations, i.e. # One [*, N, 8, 3, 3] rotation matrix and # One [*, N, 8, 3] translation matrix default_r = r.from_tensor_4x4(default_4x4) bb_rot = alpha.new_zeros((*((1,) * len(alpha.shape[:-1])), 2)) bb_rot[..., 1] = 1 # [*, N, 8, 2] alpha = torch.cat([bb_rot.expand(*alpha.shape[:-2], -1, -1), alpha], dim=-2) # [*, N, 8, 3, 3] # Produces rotation matrices of the form: # [ # [1, 0 , 0 ], # [0, a_2,-a_1], # [0, a_1, a_2] # ] # This follows the original code rather than the supplement, which uses # different indices. all_rots = alpha.new_zeros(default_r.get_rots().get_rot_mats().shape) all_rots[..., 0, 0] = 1 all_rots[..., 1, 1] = alpha[..., 1] all_rots[..., 1, 2] = -alpha[..., 0] all_rots[..., 2, 1:] = alpha all_frames = default_r.compose(Rigid(Rotation(rot_mats=all_rots), None)) chi2_frame_to_frame = all_frames[..., 5] chi3_frame_to_frame = all_frames[..., 6] chi4_frame_to_frame = all_frames[..., 7] chi1_frame_to_bb = all_frames[..., 4] chi2_frame_to_bb = chi1_frame_to_bb.compose(chi2_frame_to_frame) chi3_frame_to_bb = chi2_frame_to_bb.compose(chi3_frame_to_frame) chi4_frame_to_bb = chi3_frame_to_bb.compose(chi4_frame_to_frame) all_frames_to_bb = Rigid.cat( [ all_frames[..., :5], chi2_frame_to_bb.unsqueeze(-1), chi3_frame_to_bb.unsqueeze(-1), chi4_frame_to_bb.unsqueeze(-1), ], dim=-1, ) all_frames_to_global = r[..., None].compose(all_frames_to_bb) return all_frames_to_global def frames_and_literature_positions_to_atom14_pos( r: Rigid, aatype: torch.Tensor, default_frames: torch.Tensor, group_idx: torch.Tensor, atom_mask: torch.Tensor, lit_positions: torch.Tensor, ) -> torch.Tensor: # [*, N, 14] group_mask = group_idx[aatype, ...] # [*, N, 14, 8] group_mask_one_hot: torch.LongTensor = nn.functional.one_hot( group_mask, num_classes=default_frames.shape[-3], ) # [*, N, 14, 8] t_atoms_to_global = r[..., None, :] * group_mask_one_hot # [*, N, 14] t_atoms_to_global = t_atoms_to_global.map_tensor_fn(lambda x: torch.sum(x, dim=-1)) # [*, N, 14, 1] atom_mask = atom_mask[aatype, ...].unsqueeze(-1) # [*, N, 14, 3] lit_positions = lit_positions[aatype, ...] pred_positions = t_atoms_to_global.apply(lit_positions) pred_positions = pred_positions * atom_mask return pred_positions
transformers/src/transformers/models/esm/openfold_utils/feats.py/0
{ "file_path": "transformers/src/transformers/models/esm/openfold_utils/feats.py", "repo_id": "transformers", "token_count": 3755 }
108
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert FastSpeech2Conformer checkpoint.""" import argparse import json import re from pathlib import Path from tempfile import TemporaryDirectory import torch import yaml from transformers import ( FastSpeech2ConformerConfig, FastSpeech2ConformerModel, FastSpeech2ConformerTokenizer, logging, ) logging.set_verbosity_info() logger = logging.get_logger("transformers.models.FastSpeech2Conformer") CONFIG_MAPPING = { "adim": "hidden_size", "aheads": "num_attention_heads", "conformer_dec_kernel_size": "decoder_kernel_size", "conformer_enc_kernel_size": "encoder_kernel_size", "decoder_normalize_before": "decoder_normalize_before", "dlayers": "decoder_layers", "dunits": "decoder_linear_units", "duration_predictor_chans": "duration_predictor_channels", "duration_predictor_kernel_size": "duration_predictor_kernel_size", "duration_predictor_layers": "duration_predictor_layers", "elayers": "encoder_layers", "encoder_normalize_before": "encoder_normalize_before", "energy_embed_dropout": "energy_embed_dropout", "energy_embed_kernel_size": "energy_embed_kernel_size", "energy_predictor_chans": "energy_predictor_channels", "energy_predictor_dropout": "energy_predictor_dropout", "energy_predictor_kernel_size": "energy_predictor_kernel_size", "energy_predictor_layers": "energy_predictor_layers", "eunits": "encoder_linear_units", "pitch_embed_dropout": "pitch_embed_dropout", "pitch_embed_kernel_size": "pitch_embed_kernel_size", "pitch_predictor_chans": "pitch_predictor_channels", "pitch_predictor_dropout": "pitch_predictor_dropout", "pitch_predictor_kernel_size": "pitch_predictor_kernel_size", "pitch_predictor_layers": "pitch_predictor_layers", "positionwise_conv_kernel_size": "positionwise_conv_kernel_size", "postnet_chans": "speech_decoder_postnet_units", "postnet_filts": "speech_decoder_postnet_kernel", "postnet_layers": "speech_decoder_postnet_layers", "reduction_factor": "reduction_factor", "stop_gradient_from_energy_predictor": "stop_gradient_from_energy_predictor", "stop_gradient_from_pitch_predictor": "stop_gradient_from_pitch_predictor", "transformer_dec_attn_dropout_rate": "decoder_attention_dropout_rate", "transformer_dec_dropout_rate": "decoder_dropout_rate", "transformer_dec_positional_dropout_rate": "decoder_positional_dropout_rate", "transformer_enc_attn_dropout_rate": "encoder_attention_dropout_rate", "transformer_enc_dropout_rate": "encoder_dropout_rate", "transformer_enc_positional_dropout_rate": "encoder_positional_dropout_rate", "use_cnn_in_conformer": "use_cnn_in_conformer", "use_macaron_style_in_conformer": "use_macaron_style_in_conformer", "use_masking": "use_masking", "use_weighted_masking": "use_weighted_masking", "idim": "input_dim", "odim": "num_mel_bins", "spk_embed_dim": "speaker_embed_dim", "langs": "num_languages", "spks": "num_speakers", } def remap_model_yaml_config(yaml_config_path): with Path(yaml_config_path).open("r", encoding="utf-8") as f: args = yaml.safe_load(f) args = argparse.Namespace(**args) remapped_config = {} model_params = args.tts_conf["text2mel_params"] # espnet_config_key -> hf_config_key, any keys not included are ignored for espnet_config_key, hf_config_key in CONFIG_MAPPING.items(): if espnet_config_key in model_params: remapped_config[hf_config_key] = model_params[espnet_config_key] return remapped_config, args.g2p, args.token_list def convert_espnet_state_dict_to_hf(state_dict): new_state_dict = {} for key in state_dict: if "tts.generator.text2mel." in key: new_key = key.replace("tts.generator.text2mel.", "") if "postnet" in key: new_key = new_key.replace("postnet.postnet", "speech_decoder_postnet.layers") new_key = new_key.replace(".0.weight", ".conv.weight") new_key = new_key.replace(".1.weight", ".batch_norm.weight") new_key = new_key.replace(".1.bias", ".batch_norm.bias") new_key = new_key.replace(".1.running_mean", ".batch_norm.running_mean") new_key = new_key.replace(".1.running_var", ".batch_norm.running_var") new_key = new_key.replace(".1.num_batches_tracked", ".batch_norm.num_batches_tracked") if "feat_out" in key: if "weight" in key: new_key = "speech_decoder_postnet.feat_out.weight" if "bias" in key: new_key = "speech_decoder_postnet.feat_out.bias" if "encoder.embed.0.weight" in key: new_key = new_key.replace("0.", "") if "w_1" in key: new_key = new_key.replace("w_1", "conv1") if "w_2" in key: new_key = new_key.replace("w_2", "conv2") if "predictor.conv" in key: new_key = new_key.replace(".conv", ".conv_layers") pattern = r"(\d)\.(\d)" replacement = ( r"\1.conv" if ("2.weight" not in new_key) and ("2.bias" not in new_key) else r"\1.layer_norm" ) new_key = re.sub(pattern, replacement, new_key) if "pitch_embed" in key or "energy_embed" in key: new_key = new_key.replace("0", "conv") if "encoders" in key: new_key = new_key.replace("encoders", "conformer_layers") new_key = new_key.replace("norm_final", "final_layer_norm") new_key = new_key.replace("norm_mha", "self_attn_layer_norm") new_key = new_key.replace("norm_ff_macaron", "ff_macaron_layer_norm") new_key = new_key.replace("norm_ff", "ff_layer_norm") new_key = new_key.replace("norm_conv", "conv_layer_norm") if "lid_emb" in key: new_key = new_key.replace("lid_emb", "language_id_embedding") if "sid_emb" in key: new_key = new_key.replace("sid_emb", "speaker_id_embedding") new_state_dict[new_key] = state_dict[key] return new_state_dict @torch.no_grad() def convert_FastSpeech2ConformerModel_checkpoint( checkpoint_path, yaml_config_path, pytorch_dump_folder_path, repo_id=None, ): model_params, tokenizer_name, vocab = remap_model_yaml_config(yaml_config_path) config = FastSpeech2ConformerConfig(**model_params) # Prepare the model model = FastSpeech2ConformerModel(config) espnet_checkpoint = torch.load(checkpoint_path) hf_compatible_state_dict = convert_espnet_state_dict_to_hf(espnet_checkpoint) model.load_state_dict(hf_compatible_state_dict) model.save_pretrained(pytorch_dump_folder_path) # Prepare the tokenizer with TemporaryDirectory() as tempdir: vocab = {token: id for id, token in enumerate(vocab)} vocab_file = Path(tempdir) / "vocab.json" with open(vocab_file, "w") as f: json.dump(vocab, f) should_strip_spaces = "no_space" in tokenizer_name tokenizer = FastSpeech2ConformerTokenizer(str(vocab_file), should_strip_spaces=should_strip_spaces) tokenizer.save_pretrained(pytorch_dump_folder_path) if repo_id: print("Pushing to the hub...") model.push_to_hub(repo_id) tokenizer.push_to_hub(repo_id) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint") parser.add_argument( "--yaml_config_path", required=True, default=None, type=str, help="Path to config.yaml of model to convert" ) parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) args = parser.parse_args() convert_FastSpeech2ConformerModel_checkpoint( args.checkpoint_path, args.yaml_config_path, args.pytorch_dump_folder_path, args.push_to_hub, )
transformers/src/transformers/models/fastspeech2_conformer/convert_fastspeech2_conformer_original_pytorch_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/fastspeech2_conformer/convert_fastspeech2_conformer_original_pytorch_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 3863 }
109
# coding=utf-8 # Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch FLAVA model.""" import collections import math from collections import OrderedDict from dataclasses import dataclass from typing import Any, Dict, List, Optional, Set, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, torch_int, ) from .configuration_flava import ( FlavaConfig, FlavaImageCodebookConfig, FlavaImageConfig, FlavaMultimodalConfig, FlavaTextConfig, ) logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "facebook/flava-full" # Codebook docstring _CHECKPOINT_FOR_CODEBOOK_DOC = "facebook/flava-image-codebook" _CONFIG_CLASS_FOR_IMAGE_MODEL_DOC = "FlavaImageConfig" _CONFIG_CLASS_FOR_TEXT_MODEL_DOC = "FlavaTextConfig" _CONFIG_CLASS_FOR_MULTIMODAL_MODEL_DOC = "FlavaMultimodalConfig" _EXPECTED_IMAGE_OUTPUT_SHAPE = [1, 197, 768] LOGIT_SCALE_CLAMP_MIN = 0 LOGIT_SCALE_CLAMP_MAX = 4.6052 FlavaPossibleConfigs = Union[FlavaTextConfig, FlavaImageConfig, FlavaMultimodalConfig] @dataclass class FlavaModelOutput(ModelOutput): """ Output from FlavaModel containing embeddings and outputs from individual encoders. Note that `image_embeddings` and `text_embeddigns` returned are similar to pooled output returned from a transformer. If you want embeddings for contrastive loss or retrieval use a FLAVA model's `image_projection` and `text_projection` layers on `image_embeddings` and `text_embeddings` respectively. Args: image_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `pixel_values` are present): The image embeddings which are basically the pooled output of [`FlavaImageModel`]. image_output (`BaseModelOutputWithPooling`, *optional*, returned when `pixel_values` are present): The output of the [`FlavaImageModel`]. text_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` are present): The text embeddings which are basically the pooled output of [`FlavaTextModel`]. text_output (`BaseModelOutputWithPooling`, *optional*, returned when `input_ids` are present): The output of the [`FlavaTextModel`]. multimodal_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` and `pixel_values` are present and `skip_multimodal_encoder` is `None` or `False`): The multimodal embeddings which are basically the pooled output of [`FlavaTextModel`]. multimodal_output (`BaseModelOutputWithPooling`, returned when `input_ids` and `pixel_values` are present and `skip_multimodal_encoder` is `None` or `False`): The output of the [`FlavaMultimodalModel`]. """ image_embeddings: Optional[torch.FloatTensor] = None image_output: Optional[BaseModelOutputWithPooling] = None text_embeddings: Optional[torch.FloatTensor] = None text_output: Optional[BaseModelOutputWithPooling] = None multimodal_embeddings: Optional[torch.FloatTensor] = None multimodal_output: Optional[BaseModelOutputWithPooling] = None def to_tuple(self) -> Tuple[Any]: return tuple( self[k] if k not in ["text_output", "image_output", "multimodal_output"] else getattr(self, k).to_tuple() for k in self.keys() ) @dataclass class FlavaLosses(ModelOutput): """Class representing pretraining losses from FLAVA model Args: mim (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mim_labels` and `pixel_values` are present, `input_ids_masked` is absent and `mim_weight` > 0.: Masked Image Modeling loss as used in BeIT calculated only for unimodal image data. mlm (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mlm_labels` and `input_ids_masked` are present, `pixel_values` is absent and `mlm_weight` > 0.: Masked Language Modeling loss as used in BERT calculated only for unimodal text data. itm (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `itm_labels`, `input_ids_masked`, `pixel_values` are present and `itm_weight` > 0.: Image Text Matching (ITM) loss calculated for paired image-text data. Note that ITM loss is calculated on masked pairs in FLAVA. global_contrastive (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `input_ids` and `pixel_values` are present and `global_contrastive_weight` > 0.: Contrastive loss for image-text similarity similar to CLIP but calculated globally for paired image-text data. This is calculated on unmasked images and texts. mmm_image (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mim_labels`, `pixel_values` and `input_ids_masked` are present and `mmm_image_weight` > 0.: Masked Multimodal Modeling loss's image component calculated on paired image-text data. mmm_text (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mlm_labels`, `pixel_values` and `input_ids_masked` are present and `mmm_text_weight` > 0.: Masked Multimodal Modeling loss's text component calculated on paired image-text data. """ mim: Optional[torch.FloatTensor] = None mlm: Optional[torch.FloatTensor] = None itm: Optional[torch.FloatTensor] = None global_contrastive: Optional[torch.FloatTensor] = None mmm_image: Optional[torch.FloatTensor] = None mmm_text: Optional[torch.FloatTensor] = None def all_none(self) -> bool: all_none = True for v in self.values(): if v is not None: all_none = False break return all_none @dataclass class FlavaForPreTrainingOutput(ModelOutput): """ Output from FlavaForPreTraining containing embeddings, and outputs from individual encoders. Note that `image_embeddings` and `text_embeddings` returned are similar to pooled output returned from a transformer. If you want embeddings for contrastive loss or retrieval use a FLAVA model's `image_projection` and `text_projection` layers on `image_embeddings` and `text_embeddings` respectively. Args: loss (`torch.FloatTensor`, *optional*, returned when `return_loss` is True): Total loss calculated for this model. loss_info (`FlavaLosses`): Detailed info for FLAVA Pretraining losses. Check `FlavaLosses` class description for the information on the keys. image_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `pixel_values` are present): The image embeddings which are basically the pooled output of [`FlavaImageModel`]. image_output (`BaseModelOutputWithPooling`, *optional*, returned when `pixel_values` are present): The output of the [`FlavaImageModel`]. text_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` are present): The text embeddings which are basically the pooled output of [`FlavaTextModel`]. text_output (`BaseModelOutputWithPooling`, *optional*, returned when `input_ids` are present): The output of the [`FlavaTextModel`]. multimodal_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` and `pixel_values` are present and `skip_unmasked_multimodal_encoder` is `None` or `False`): The multimodal embeddings which are basically the pooled output of [`FlavaTextModel`]. multimodal_output (`BaseModelOutputWithPooling`, returned when `input_ids` and `pixel_values` are present and `skip_unmasked_multimodal_encoder` is `None` or `False`): The output of the [`FlavaMultimodalModel`]. image_masked_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `pixel_values` are present): The image embeddings which are basically the pooled output of [`FlavaImageModel`]. Uses `bool_masked_pos` to create masked images. image_masked_output (`BaseModelOutputWithPooling`, *optional*, returned when `pixel_values` are present): The output of the [`FlavaImageModel`]. Uses `bool_masked_pos` to create masked images. text_masked_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids_masked` are present): The text embeddings which are basically the pooled output of [`FlavaTextModel`]. text_masked_output (`BaseModelOutputWithPooling`, *optional*, returned when `input_ids_masked` are present): The output of the [`FlavaTextModel`]. multimodal_masked_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` and `pixel_values` are present): The multimodal embeddings which are basically the pooled output of [`FlavaTextModel`]. multimodal_masked_output (`BaseModelOutputWithPooling`, *optional*, returned when `input_ids_masked` and `pixel_values` are present): The output of the [`FlavaMultimodalModel`]. mim_logits (`torch.FloatTensor` of shape `(batch_size, num_image_patches, image_vocab_size)` or of shape `(total_masked_patches, image_vocab_size)` , *optional*, returned when `pixel_values` are present and `input_ids_masked` are not): The logits for MIM unimodal loss. Uses `book_masked_pos` to get masked patches. The flattened output is returned when `bool_masked_pos` has some of the patches masked. mlm_logits (`torch.FloatTensor` of shape `(batch_size, text_seq_length, text_vocab_size)` or of shape `(total_masked_seq_length, text_vocab_size)`, *optional*, returned when `input_ids_masked` are present and `pixel_values` are not): The logits for MLM unimodal loss. The flattened output is returned when `input_ids_masked` has some of the tokens masked. itm_logits (`torch.FloatTensor` of shape `(batch_size, 2)`, *optional*, returned when `input_ids_masked` and `pixel_values` are present): The logits for ITM loss. Note that ITM loss is calculated on masked pairs in FLAVA. mmm_image_logits (`torch.FloatTensor` of shape `(batch_size, num_image_patches, image_vocab_size)` or of shape`(total_masked_patches, image_vocab_size)`, *optional*, returned when `pixel_values` and `input_ids_masked` are present): The logits for MMM image multimodal loss. Uses `book_masked_pos` to get masked patches. The flattened output is returned when `bool_masked_pos` has some of the patches masked. mmm_text_logits (`torch.FloatTensor` of shape `(batch_size, text_seq_length, text_vocab_size)` or of shape `(`(total_masked_seq_length, text_vocab_size)`), *optional*, returned when `pixel_values` and `input_ids_masked` are present): The logits for MMM text multimodal loss. The flattened output is returned when `input_ids_masked` has some of the tokens masked. contrastive_logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): The scaled dot product scores between `image_embeddings` and `text_embeddings` but passed through FLAVA's `image_projection` and `text_projection` layers respectively. This represents the image-text similarity scores. This is calculated on unmasked images and texts. contrastive_logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): The scaled dot product scores between `text_embeddings` and `image_embeddings` but passed through FLAVA's `text_projection` and `image_projection` layers respectively. This is calculated on unmasked images and texts. """ loss: Optional[torch.FloatTensor] = None loss_info: FlavaLosses = None image_embeddings: Optional[torch.FloatTensor] = None image_output: Optional[BaseModelOutputWithPooling] = None text_embeddings: Optional[torch.FloatTensor] = None text_output: Optional[BaseModelOutputWithPooling] = None multimodal_embeddings: Optional[torch.FloatTensor] = None multimodal_output: Optional[BaseModelOutputWithPooling] = None image_masked_embeddings: Optional[torch.FloatTensor] = None image_masked_output: Optional[BaseModelOutputWithPooling] = None text_masked_embeddings: Optional[torch.FloatTensor] = None text_masked_output: Optional[BaseModelOutputWithPooling] = None multimodal_masked_embeddings: Optional[torch.FloatTensor] = None multimodal_masked_output: Optional[BaseModelOutputWithPooling] = None mim_logits: Optional[torch.FloatTensor] = None mlm_logits: Optional[torch.FloatTensor] = None itm_logits: Optional[torch.FloatTensor] = None contrastive_logits_per_image: Optional[torch.FloatTensor] = None contrastive_logits_per_text: Optional[torch.FloatTensor] = None mmm_image_logits: Optional[torch.FloatTensor] = None mmm_text_logits: Optional[torch.FloatTensor] = None def to_tuple(self) -> Tuple[Any]: transformer_outputs = [ "text_output", "image_output", "multimodal_output", "text_masked_output", "image_masked_output", "multimodal_masked_output", ] return tuple(self[k] if k not in transformer_outputs else getattr(self, k).to_tuple() for k in self.keys()) # Based on timm implementation, which can be found here: # https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/image_transformer.py class FlavaImageEmbeddings(nn.Module): """ Construct the CLS token, position and patch embeddings. Optionally, also the mask token. """ def __init__(self, config: FlavaImageConfig, use_mask_token: bool = False) -> None: super().__init__() use_mask_token = use_mask_token or config.mask_token self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) if use_mask_token else None self.patch_embeddings = PatchEmbeddings( image_size=config.image_size, patch_size=config.patch_size, num_channels=config.num_channels, embed_dim=config.hidden_size, ) num_patches = self.patch_embeddings.num_patches self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size)) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.patch_size = config.patch_size self.config = config # Copied from transformers.models.vit.modeling_vit.ViTEmbeddings.interpolate_pos_encoding def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 """ num_patches = embeddings.shape[1] - 1 num_positions = self.position_embeddings.shape[1] - 1 # always interpolate when tracing to ensure the exported model works for dynamic input shapes if not torch.jit.is_tracing() and num_patches == num_positions and height == width: return self.position_embeddings class_pos_embed = self.position_embeddings[:, :1] patch_pos_embed = self.position_embeddings[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions**0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate( patch_pos_embed, size=(new_height, new_width), mode="bicubic", align_corners=False, ) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def forward( self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor] = None, interpolate_pos_encoding: bool = False, ) -> torch.Tensor: batch_size, num_channels, height, width = pixel_values.shape embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) batch_size, seq_len, _ = embeddings.size() if bool_masked_pos is not None: mask_tokens = self.mask_token.expand(batch_size, seq_len, -1) # B X H X W = B X HW if bool_masked_pos.dim() == 3: bool_masked_pos = bool_masked_pos.view(bool_masked_pos.size(0), -1) # replace the masked visual tokens by mask_tokens mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens) embeddings = embeddings * (1.0 - mask) + mask_tokens * mask # add the [CLS] token to the embedded patch tokens cls_tokens = self.cls_token.expand(batch_size, -1, -1) embeddings = torch.cat((cls_tokens, embeddings), dim=1) # add positional encoding to each token if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embeddings embeddings = self.dropout(embeddings) return embeddings # Based on timm implementation, which can be found here: # https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/image_transformer.py class PatchEmbeddings(nn.Module): """ Image to Patch Embedding. """ def __init__( self, image_size: int = 224, patch_size: Union[int, Tuple[int, int]] = 16, num_channels: int = 3, embed_dim: int = 768, ): super().__init__() if not isinstance(image_size, collections.abc.Iterable): image_size = (image_size, image_size) if not isinstance(patch_size, collections.abc.Iterable): patch_size = (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_patches = num_patches self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=patch_size) def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False) -> torch.Tensor: batch_size, num_channels, height, width = pixel_values.shape if not interpolate_pos_encoding: if height != self.image_size[0] or width != self.image_size[1]: raise ValueError( f"Input image size ({height}*{width}) doesn't match model" f" ({self.image_size[0]}*{self.image_size[1]})." ) x = self.projection(pixel_values).flatten(2).transpose(1, 2) return x class FlavaTextEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False ) def forward( self, input_ids: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, ): input_shape = input_ids.size() seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, :seq_length] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class FlavaSelfAttention(nn.Module): def __init__(self, config: FlavaPossibleConfigs) -> None: super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size {config.hidden_size,} is not a multiple of the number of attention " f"heads {config.num_attention_heads}." ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class FlavaSelfOutput(nn.Module): """ The residual connection is defined in FlavaLayer (same as ViTLayer) instead of here (as is the case with other models), due to the layernorm applied before each block. """ def __init__(self, config: FlavaPossibleConfigs) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class FlavaAttention(nn.Module): def __init__(self, config: FlavaPossibleConfigs) -> None: super().__init__() self.attention = FlavaSelfAttention(config) self.output = FlavaSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads: Set[int]) -> None: if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads ) # Prune linear layers self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: self_outputs = self.attention( hidden_states, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class FlavaIntermediate(nn.Module): def __init__(self, config: FlavaPossibleConfigs) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act # Copied from transformers.models.vit.modeling_vit.ViTIntermediate.forward def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class FlavaOutput(nn.Module): def __init__(self, config: FlavaPossibleConfigs) -> None: super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) # Copied from transformers.models.vit.modeling_vit.ViTOutput.forward def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states class FlavaLayer(nn.Module): """This corresponds to the Block class in the timm implementation.""" def __init__(self, config: FlavaPossibleConfigs) -> None: super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = FlavaAttention(config) self.intermediate = FlavaIntermediate(config) self.output = FlavaOutput(config) # TODO: Check fp32 layer norm possiblity self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: self_attention_outputs = self.attention( self.layernorm_before(hidden_states), # in ViT, layernorm is applied before self-attention attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights # first residual connection hidden_states = attention_output + hidden_states # in ViT, layernorm is also applied after self-attention layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) # second residual connection is done here layer_output = self.output(layer_output, hidden_states) outputs = (layer_output,) + outputs return outputs class FlavaEncoder(nn.Module): def __init__(self, config: FlavaConfig) -> None: super().__init__() self.config = config self.layer = nn.ModuleList([FlavaLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ) -> Union[tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, attention_mask, layer_head_mask, output_attentions, ) else: layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions ) class FlavaPooler(nn.Module): def __init__(self, config: FlavaPossibleConfigs): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output FLAVA_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`{config}`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ FLAVA_INPUTS_DOCSTRING_COMMON = r""" attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ FLAVA_IMAGE_INPUTS_DOCSTRING_BASE = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`FlavaImageProcessor.__call__`] for details. bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, image_num_patches)`): Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). interpolate_pos_encoding (`bool`, *optional*): Whether to interpolate the pre-trained position encodings. """ FLAVA_IMAGE_INPUTS_DOCSTRING = FLAVA_IMAGE_INPUTS_DOCSTRING_BASE + FLAVA_INPUTS_DOCSTRING_COMMON FLAVA_TEXT_INPUTS_DOCSTRING_BASE = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) """ FLAVA_TEXT_INPUTS_DOCSTRING = FLAVA_TEXT_INPUTS_DOCSTRING_BASE + FLAVA_INPUTS_DOCSTRING_COMMON FLAVA_MULTIMODAL_INPUTS_DOCSTRING = ( r""" Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, image_num_patches + text_seq_len, hidden_size)`): The concatenated hidden states of unimodal encoders. """ + FLAVA_INPUTS_DOCSTRING_COMMON ) FLAVA_MODEL_INPUTS_DOCSTRING_BASE = r""" Args: skip_multimodal_encoder (*bool*, *optional*): Skip any calculations for multimodal encoder. Useful if multimodal encoding is not going to be used. """ FLAVA_MODEL_INPUTS_DOCSTRING = ( FLAVA_IMAGE_INPUTS_DOCSTRING_BASE + FLAVA_TEXT_INPUTS_DOCSTRING_BASE + FLAVA_INPUTS_DOCSTRING_COMMON + FLAVA_MODEL_INPUTS_DOCSTRING_BASE ) FLAVA_PRETRAINING_INPUTS_DOCSTRING = ( r""" Args: input_ids_masked (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. These ones are the masked version of the original task to be used with MLM. Indices can be obtained using [`AutoTokenizer`] along with [`DataCollatorForMaskedLanguageModeling`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) """ + FLAVA_TEXT_INPUTS_DOCSTRING_BASE + FLAVA_IMAGE_INPUTS_DOCSTRING_BASE + r""" image_attention_mask (`torch.FloatTensor` of shape `({1})`, *optional*): Mask to avoid performing attention on padding token indices specifically for images. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) skip_unmasked_multimodal_encoder (*bool*, *optional*): Skip any calculations for multimodal encoder for unmasked inputs. FLAVA pretraining doesn't need unmasked multimodal embeddings or outputs as of now. mlm_labels (`torch.LongTensor` of shape `(batch_size, text_seq_len)`, *optional*): Labels for computing the left-to-right language and multimodal masked modeling loss (next word prediction). Indices should be in `[-100, 0, ..., text_config.vocab_size - 1]` (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., text_config.vocab_size - 1]`. mim_labels (`torch.LongTensor` of shape `(batch_size, image_num_patches)`, *optional*): Labels for computing the image and multimodal masked modeling loss. Indices should be in `[-100, 0, ..., image_config.vocab_size - 1]`. Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., image_config.vocab_size - 1]`. If not passed, they are generated automatically using the image codebook assigned to the model. By default, it uses [`FlavaImageCodebook`]. See [`FlavaImageCodebook`] to understand how to generate mim_labels. itm_labels (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*): Labels for computing the image-text matching loss. 0 means the pairs don't match and 1 means they match. The pairs with 0 will be skipped for calculation of MMM and global contrastive losses as well. return_loss (`bool`, *optional*, default to None): Whether to return calculated loss or not. """ + FLAVA_INPUTS_DOCSTRING_COMMON ) FLAVA_PRETRAINING_START_DOCSTRING_EXTRA = r""" Parameters: image_codebook ([`nn.Module`]): If passed, the image codebook will be set to this. Otherwise. it will be initialized using the image_codebook_config defined in the config first as the first parameter. """ class FlavaPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = FlavaConfig base_model_prefix = "flava" supports_gradient_checkpointing = True def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) @add_start_docstrings( "The bare FLAVA Image Model transformer outputting raw hidden-states without any specific head on top.", FLAVA_START_DOCSTRING.format(config="FlavaImageConfig"), ) class FlavaImageModel(FlavaPreTrainedModel): config_class = FlavaImageConfig # This override allows us to load FlavaImageModel from FlavaModel/FlavaForPreTraining checkpoints. base_model_prefix = "flava.image_model" main_input_name = "pixel_values" def __init__(self, config: FlavaImageConfig, add_pooling_layer: bool = True): super().__init__(config) self.config = config self.embeddings = FlavaImageEmbeddings(config) self.encoder = FlavaEncoder(config) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pooler = FlavaPooler(config) if add_pooling_layer else None self.post_init() def get_input_embeddings(self) -> nn.Module: return self.embeddings.patch_embeddings def set_input_embeddings(self, value: nn.Module): self.embeddings.patch_embeddings = value def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None: """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(FLAVA_IMAGE_INPUTS_DOCSTRING.format("batch_size, image_num_patches")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPooling, config_class=_CONFIG_CLASS_FOR_IMAGE_MODEL_DOC, modality="vision", expected_output=_EXPECTED_IMAGE_OUTPUT_SHAPE, ) def forward( self, pixel_values: Optional[torch.Tensor] = None, bool_masked_pos: Optional[torch.BoolTensor] = None, interpolate_pos_encoding: Optional[bool] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding ) encoder_outputs = self.encoder( embedding_output, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @add_start_docstrings( "The bare FLAVA Text Model transformer outputting raw hidden-states without any specific head on top.", FLAVA_START_DOCSTRING.format(config="FlavaTextConfig"), ) class FlavaTextModel(FlavaPreTrainedModel): config_class = FlavaTextConfig # This override allows us to load FlavaTextModel from FlavaModel/FlavaForPreTraining checkpoints. base_model_prefix = "flava.text_model" def __init__(self, config: FlavaTextConfig, add_pooling_layer: bool = True): super().__init__(config) self.config = config self.embeddings = FlavaTextEmbeddings(config) self.encoder = FlavaEncoder(config) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pooler = FlavaPooler(config) if add_pooling_layer else None self.post_init() def get_input_embeddings(self) -> PatchEmbeddings: return self.embeddings.word_embeddings def set_input_embeddings(self, value: nn.Module): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None: """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(FLAVA_TEXT_INPUTS_DOCSTRING.format("batch_size, text_seq_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPooling, config_class=_CONFIG_CLASS_FOR_TEXT_MODEL_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is None: raise ValueError("You have to specify input_ids") input_shape = input_ids.size() if attention_mask is None: attention_mask = torch.ones(input_shape, device=input_ids.device) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask( attention_mask, input_shape, input_ids.device ) embedding_output = self.embeddings( input_ids=input_ids, token_type_ids=token_type_ids, position_ids=position_ids, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @add_start_docstrings( "The bare FLAVA Multimodal Model transformer outputting raw hidden-states without any specific head on top.", FLAVA_START_DOCSTRING.format(config="FlavaMultimodalConfig"), ) class FlavaMultimodalModel(FlavaPreTrainedModel): config_class = FlavaMultimodalConfig # This override allows us to load FlavaMultimodalModel from FlavaModel/FlavaForPreTraining checkpoints. base_model_prefix = "flava.multimodal_model" main_input_name = "hidden_states" def __init__(self, config: FlavaMultimodalConfig, add_pooling_layer=True): super().__init__(config) self.config = config self.use_cls_token = self.config.use_cls_token if self.use_cls_token: self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) self.encoder = FlavaEncoder(config) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pooler = FlavaPooler(config) if add_pooling_layer else None self.post_init() def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None: """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward( FLAVA_MULTIMODAL_INPUTS_DOCSTRING.format("batch_size, image_num_patches + text_seq_len") ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPooling, config_class=_CONFIG_CLASS_FOR_MULTIMODAL_MODEL_DOC, ) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size, seq_length, _ = hidden_states.size() if self.use_cls_token: cls_tokens = self.cls_token.expand(batch_size, -1, -1) hidden_states = torch.cat((cls_tokens, hidden_states), dim=1) seq_length += 1 if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length), device=hidden_states.device) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask( attention_mask, (batch_size, seq_length), hidden_states.device ) encoder_outputs = self.encoder( hidden_states, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @add_start_docstrings( "The bare FLAVA Model transformer outputting raw hidden-states without any specific head on top.", FLAVA_START_DOCSTRING.format(config="FlavaConfig"), ) class FlavaModel(FlavaPreTrainedModel): config_class = FlavaConfig def __init__(self, config: FlavaConfig): super().__init__(config) if not isinstance(config.text_config, FlavaTextConfig): raise TypeError( "config.text_config is expected to be of type FlavaTextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.image_config, FlavaImageConfig): raise TypeError( "config.image_config is expected to be of type FlavaImageConfig but is of type" f" {type(config.image_config)}." ) if not isinstance(config.multimodal_config, FlavaMultimodalConfig): raise TypeError( "config.multimodal_config is expected to be of type FlavaMultimodalConfig but " + f"is of type {type(config.multimodal_config)}." ) text_config = config.text_config image_config = config.image_config multimodal_config = config.multimodal_config self.projection_dim = config.projection_dim self.text_hidden_size = text_config.hidden_size self.image_hidden_size = image_config.hidden_size self.mm_hidden_size = multimodal_config.hidden_size self.text_model = FlavaTextModel(text_config) self.image_model = FlavaImageModel(image_config) self.multimodal_model = FlavaMultimodalModel(multimodal_config) self.image_projection = nn.Linear(self.image_hidden_size, self.projection_dim) self.text_projection = nn.Linear(self.text_hidden_size, self.projection_dim) self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value)) self.image_to_mm_projection = nn.Linear(self.image_hidden_size, self.mm_hidden_size) self.text_to_mm_projection = nn.Linear(self.text_hidden_size, self.mm_hidden_size) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(FLAVA_TEXT_INPUTS_DOCSTRING.format("batch_size, text_seq_length")) def get_text_features( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> torch.FloatTensor: r""" Returns: text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`FlavaTextModel`]. Examples: ```python >>> from transformers import AutoProcessor, FlavaModel >>> model = FlavaModel.from_pretrained("{0}") >>> processor = AutoProcessor.from_pretrained("{0}") >>> inputs = processor( ... text=["a photo of a cat", "a photo of a dog"], max_length=77, padding="max_length", return_tensors="pt" ... ) >>> text_features = model.get_text_features(**inputs) ```""".format(_CHECKPOINT_FOR_DOC) text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = text_outputs[0] # last_hidden_state text_features = self.text_projection(pooled_output) return text_features @add_start_docstrings_to_model_forward(FLAVA_IMAGE_INPUTS_DOCSTRING.format("batch_size, image_num_patches")) def get_image_features( self, pixel_values: Optional[torch.Tensor] = None, bool_masked_pos: Optional[torch.BoolTensor] = None, interpolate_pos_encoding: Optional[bool] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> torch.FloatTensor: r""" Returns: image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`FlavaImageModel`]. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, FlavaModel >>> model = FlavaModel.from_pretrained("{0}") >>> processor = AutoProcessor.from_pretrained("{0}") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> image_features = model.get_image_features(**inputs) ```""".format(_CHECKPOINT_FOR_DOC) image_outputs = self.image_model( pixel_values=pixel_values, bool_masked_pos=bool_masked_pos, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, ) pooled_output = image_outputs[0] # last_hidden_state image_features = self.image_projection(pooled_output) return image_features @add_start_docstrings_to_model_forward( FLAVA_MODEL_INPUTS_DOCSTRING.format("batch_size, image_num_patches + text_seq_len") ) @replace_return_docstrings(output_type=FlavaModelOutput, config_class=FlavaConfig) def forward( self, input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, bool_masked_pos: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, image_attention_mask: Optional[torch.Tensor] = None, skip_multimodal_encoder: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: bool = True, return_dict: Optional[bool] = None, ) -> Union[Tuple, FlavaOutput]: r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, FlavaModel >>> model = FlavaModel.from_pretrained("facebook/flava-full") >>> processor = AutoProcessor.from_pretrained("facebook/flava-full") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(text=["a photo of a cat"], images=image, return_tensors="pt", padding=True) >>> outputs = model(**inputs) >>> image_embeddings = outputs.image_embeddings >>> text_embeddings = outputs.text_embeddings >>> multimodal_embeddings = outputs.multimodal_embeddings >>> outputs.image_embeddings.shape torch.Size([1, 197, 768]) >>> text_embeddings.shape torch.Size([1, 7, 768]) >>> multimodal_embeddings.shape torch.Size([1, 205, 768]) ``` """ return_dict = return_dict if return_dict is not None else self.config.return_dict if not output_hidden_states: raise ValueError("FLAVA model requires hidden states to work. Please set `output_hidden_states=True`") image_embeddings = None image_states = None image_mm_projection = None image_output = None if pixel_values is not None: image_output = self.image_model( pixel_values=pixel_values, bool_masked_pos=bool_masked_pos, attention_mask=image_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) image_embeddings, image_states = image_output[0], image_output[2] # Note that these states don't use final layernorm in the transformer model image_mm_projection = self.image_to_mm_projection(image_states[-1]) text_embeddings = None text_states = None text_mm_projection = None text_output = None if input_ids is not None: text_output = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, token_type_ids=token_type_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) text_embeddings, text_states = text_output[0], text_output[2] # Note that these states don't use final layernorm in the transformer model text_mm_projection = self.text_to_mm_projection(text_states[-1]) multimodal_embeddings = None multimodal_output = None if image_mm_projection is not None and text_mm_projection is not None and not skip_multimodal_encoder: if attention_mask is not None: batch_size, seq_len, _ = image_mm_projection.shape if self.multimodal_model.use_cls_token: seq_len += 1 attention_mask_image = torch.ones(batch_size, seq_len, device=image_mm_projection.device) attention_multimodal = torch.cat([attention_mask_image, attention_mask], dim=1) else: attention_multimodal = None multimodal_input = torch.cat([image_mm_projection, text_mm_projection], dim=1) multimodal_output = self.multimodal_model( multimodal_input, attention_mask=attention_multimodal, return_dict=return_dict ) multimodal_embeddings = multimodal_output[0] if not return_dict: return ( image_embeddings, image_output, text_embeddings, text_output, multimodal_embeddings, multimodal_output, ) return FlavaModelOutput( image_embeddings=image_embeddings, image_output=image_output, text_embeddings=text_embeddings, text_output=text_output, multimodal_embeddings=multimodal_embeddings, multimodal_output=multimodal_output, ) class FlavaImageCodebookResPath(nn.Module): def __init__(self, in_size: int, out_size: int, **kwargs): super().__init__() hid_size = out_size // 4 path = OrderedDict() path["relu_1"] = nn.ReLU() path["conv_1"] = nn.Conv2d(in_size, hid_size, kernel_size=3, padding=1) path["relu_2"] = nn.ReLU() path["conv_2"] = nn.Conv2d(hid_size, hid_size, kernel_size=3, padding=1) path["relu_3"] = nn.ReLU() path["conv_3"] = nn.Conv2d(hid_size, hid_size, kernel_size=3, padding=1) path["relu_4"] = nn.ReLU() path["conv_4"] = nn.Conv2d(hid_size, out_size, kernel_size=1, padding=0) self.path = nn.Sequential(path) def forward(self, x: torch.Tensor) -> torch.Tensor: return self.path(x) class FlavaImageCodebookBlock(nn.Module): def __init__(self, in_size: int, out_size: int, num_layers: int, **kwargs): super().__init__() self.post_gain = 1 / (num_layers**2) if in_size != out_size: self.id_path = nn.Conv2d(in_size, out_size, kernel_size=1, padding=0) else: self.id_path = nn.Identity() self.res_path = FlavaImageCodebookResPath(in_size, out_size) def forward(self, x: torch.Tensor) -> torch.Tensor: return self.id_path(x) + self.post_gain * self.res_path(x) class FlavaImageCodebookLayerGroup(nn.Module): def __init__(self, num_blocks: int, num_layers: int, in_size: int, out_size: int, use_pool: bool = True): super().__init__() blocks = OrderedDict() for i in range(num_blocks): if i == 0: blocks[f"block_{i+1}"] = FlavaImageCodebookBlock(in_size, out_size, num_layers) else: blocks[f"block_{i+1}"] = FlavaImageCodebookBlock(out_size, out_size, num_layers) if use_pool: blocks["pool"] = nn.MaxPool2d(kernel_size=2) self.group = nn.Sequential(blocks) def forward(self, x: torch.Tensor) -> torch.Tensor: return self.group(x) # Inspired by DALLE Encoder in https://github.com/openai/DALL-E/blob/5be4b236bc3ade6943662354117a0e83752cc322/dall_e/encoder.py#L42 @add_start_docstrings( """ The FLAVA's image codebook model inspired from DALL-E's original encoder. Outputs raw hidden states and can be used to generate image tokens for an image based on DALL-E's vocab. Used to generate labels for MIM. Use `get_codebook_indices` to get image tokens for an image. """, FLAVA_START_DOCSTRING.format(config="FlavaImageCodebookConfig"), ) class FlavaImageCodebook(FlavaPreTrainedModel): base_model_prefix = "" config_class = FlavaImageCodebookConfig main_input_name = "pixel_values" supports_gradient_checkpointing = False def __init__( self, config: FlavaImageCodebookConfig, **kwargs: Any, ): super().__init__(config) self.config = config self.num_groups = config.num_groups self.input_channels = config.input_channels self.num_blocks_per_group = config.num_blocks_per_group self.hidden_size = config.hidden_size self.vocab_size = config.vocab_size num_layers = self.num_groups * self.num_blocks_per_group output_blocks = OrderedDict() output_blocks["relu"] = nn.ReLU() output_blocks["conv"] = nn.Conv2d(8 * self.hidden_size, self.vocab_size, kernel_size=1, padding=0) blocks = OrderedDict() blocks["input"] = nn.Conv2d(self.input_channels, 1 * self.hidden_size, kernel_size=7, padding=3) blocks["group_1"] = FlavaImageCodebookLayerGroup( self.num_blocks_per_group, num_layers, 1 * self.hidden_size, 1 * self.hidden_size ) blocks["group_2"] = FlavaImageCodebookLayerGroup( self.num_blocks_per_group, num_layers, 1 * self.hidden_size, 2 * self.hidden_size ) blocks["group_3"] = FlavaImageCodebookLayerGroup( self.num_blocks_per_group, num_layers, 2 * self.hidden_size, 4 * self.hidden_size ) blocks["group_4"] = FlavaImageCodebookLayerGroup( self.num_blocks_per_group, num_layers, 4 * self.hidden_size, 8 * self.hidden_size, use_pool=False ) blocks["output"] = nn.Sequential(output_blocks) self.blocks = nn.Sequential(blocks) self.post_init() if self.config.freeze: for param in self.parameters(): param.requires_grad = False def get_codebook_indices(self, pixel_values: torch.Tensor) -> torch.Tensor: """ Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Codebook pixel values can be obtained using [`AutoImageProcessor`] by passing `return_codebook_pixels=True`. See [`FlavaImageProcessor.__call__`] for details. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoImageProcessor, FlavaImageCodebook >>> model = FlavaImageCodebook.from_pretrained("{0}") >>> image_processor = AutoImageProcessor.from_pretrained("{0}") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = image_processor([image], return_codebook_pixels=True, return_tensors="pt") >>> inputs = dict(pixel_values=inputs.codebook_pixel_values) >>> outputs = model.get_codebook_indices(**inputs) ``` """.format(_CHECKPOINT_FOR_CODEBOOK_DOC) z_logits = self.blocks(pixel_values) return torch.argmax(z_logits, axis=1) def get_codebook_probs(self, pixel_values: torch.Tensor) -> torch.Tensor: z_logits = self.blocks(pixel_values) return nn.Softmax(dim=1)(z_logits) def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: """ Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Codebook pixel values can be obtained using [`AutoImageProcessor`] by passing `return_codebook_pixels=True`. See [`FlavaImageProcessor.__call__`] for details. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoImageProcessor, FlavaImageCodebook >>> model = FlavaImageCodebook.from_pretrained("{0}") >>> image_processor = AutoImageProcessor.from_pretrained("{0}") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = image_processor([image], return_codebook_pixels=True, return_tensors="pt") >>> inputs = dict(pixel_values=inputs.codebook_pixel_values) >>> outputs = model(**inputs) >>> print(outputs.shape) (1, 196) ``` """.format(_CHECKPOINT_FOR_CODEBOOK_DOC) if len(pixel_values.shape) != 4: raise ValueError(f"input shape {pixel_values.shape} is not 4d") if pixel_values.shape[1] != self.input_channels: raise ValueError(f"input has {pixel_values.shape[1]} channels but model built for {self.input_channels}") return self.blocks(pixel_values) class FlavaPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class FlavaMaskedPredictionHead(nn.Module): def __init__(self, config, weight=None): super().__init__() self.config = config self.transform = FlavaPredictionHeadTransform(config) self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) if weight is not None: self.decoder.weight = weight # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def _tie_weights(self): self.decoder.bias = self.bias def forward(self, x): x = self.transform(x) x = self.decoder(x) return x class FlavaITMHead(nn.Module): def __init__(self, config): super().__init__() self.config = config self.pooler = FlavaPooler(config) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, x): x = self.pooler(x) x = self.seq_relationship(x) return x class FlavaGlobalContrastiveHead(nn.Module): def __init__(self, config): super().__init__() self.config = config self.global_backprop_contrastive = config.global_backprop_contrastive def forward(self, image_embeddings, text_embeddings, logit_scale): temperature = torch.exp(logit_scale) if not torch.distributed.is_available() or not torch.distributed.is_initialized(): labels = torch.arange(image_embeddings.size(0), device=image_embeddings.device) image_embeddings_all = [image_embeddings] text_embeddings_all = [text_embeddings] else: local_batch_size = image_embeddings.size(0) world_size = torch.distributed.get_world_size() if self.global_backprop_contrastive: # `torch.distributed.nn.functional.all_gather` does backprop on all active workers # whereas `torch.distributed.all_gather` does only backpropagates on the current worker. image_embeddings_all = torch.distributed.nn.functional.all_gather(image_embeddings) text_embeddings_all = torch.distributed.nn.functional.all_gather(text_embeddings) else: image_embeddings_all = [torch.zeros_like(text_embeddings) for _ in range(world_size)] text_embeddings_all = [torch.zeros_like(image_embeddings) for _ in range(world_size)] torch.distributed.all_gather(image_embeddings_all, image_embeddings) torch.distributed.all_gather(text_embeddings_all, text_embeddings) labels = local_batch_size * torch.distributed.get_rank() + torch.arange( local_batch_size, device=image_embeddings.device ) image_embeddings_all = torch.cat(image_embeddings_all) text_embeddings_all = torch.cat(text_embeddings_all) logits_per_image = torch.matmul(image_embeddings, text_embeddings_all.transpose(0, 1)) * temperature logits_per_text = torch.matmul(text_embeddings, image_embeddings_all.transpose(0, 1)) * temperature return logits_per_image, logits_per_text, labels @add_start_docstrings( """ The FLAVA model for pretraining which outputs losses, embeddings, logits and transformer outputs. """, FLAVA_START_DOCSTRING.format(config="FlavaConfig") + FLAVA_PRETRAINING_START_DOCSTRING_EXTRA, ) class FlavaForPreTraining(FlavaPreTrainedModel): # Those are linked to xxx.bias _tied_weights_keys = [ "mmm_text_head.decoder.bias", "mmm_image_head.decoder.bias", "mlm_head.decoder.bias", "mim_head.decoder.bias", ] def __init__(self, config: FlavaConfig, image_codebook: Optional[nn.Module] = None): super().__init__(config) self.flava = FlavaModel(config) self.image_codebook = image_codebook if self.image_codebook is None and config.init_codebook: self.image_codebook = FlavaImageCodebook(config.image_codebook_config) # Levarage text and image encoder configs to create the masked # head since it has the right vocab self.mim_head = FlavaMaskedPredictionHead(config.image_config) self.mlm_head = FlavaMaskedPredictionHead(config.text_config) self.itm_head = FlavaITMHead(config) self.mmm_image_head = FlavaMaskedPredictionHead(config.image_config) self.mmm_text_head = FlavaMaskedPredictionHead(config.text_config) self.global_contrastive_head = FlavaGlobalContrastiveHead(config) self.image_vocab_size = config.image_config.vocab_size self.text_vocab_size = config.text_config.vocab_size self.mlm_weight = config.mlm_weight self.mim_weight = config.mim_weight self.global_contrastive_weight = config.global_contrastive_weight self.ce_ignore_index = config.ce_ignore_index self.itm_weight = config.itm_weight self.mmm_image_weight = config.mmm_image_weight self.mmm_text_weight = config.mmm_text_weight self.skip_unmasked_multimodal_encoder = config.skip_unmasked_multimodal_encoder self.post_init() def _resize_to_2d(self, x: torch.Tensor): if x.dim() > 2: x = x.view(x.size(0), -1) return x @add_start_docstrings_to_model_forward( FLAVA_PRETRAINING_INPUTS_DOCSTRING.format("batch_size, text_seq_len", "batch_size, image_num_patches") ) @replace_return_docstrings(output_type=FlavaForPreTrainingOutput, config_class=FlavaConfig) def forward( self, input_ids: Optional[torch.LongTensor] = None, input_ids_masked: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, codebook_pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, bool_masked_pos: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, image_attention_mask: Optional[torch.Tensor] = None, skip_unmasked_multimodal_encoder: bool = None, mlm_labels: Optional[torch.Tensor] = None, mim_labels: Optional[torch.Tensor] = None, itm_labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: bool = True, return_dict: Optional[bool] = None, return_loss: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], FlavaForPreTrainingOutput]: """ Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import FlavaForPreTraining, AutoProcessor >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> model = FlavaForPreTraining.from_pretrained("facebook/flava-full") >>> processor = AutoProcessor.from_pretrained("facebook/flava-full") >>> text = ["a photo of a cat"] >>> inputs = processor( ... images=[image], ... text=text, ... return_masks=True, ... return_codebook_pixels=True, ... padding=True, ... max_length=77, ... return_tensors="pt", ... ) >>> output = model(**inputs) ``` Return: """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict return_loss = return_loss if return_loss is not None else self.config.return_loss skip_unmasked_multimodal_encoder = ( skip_unmasked_multimodal_encoder if skip_unmasked_multimodal_encoder is not None else self.skip_unmasked_multimodal_encoder ) if input_ids_masked is None and input_ids is not None: logger.warning( "`input_ids_masked` isn't passed which means MLM loss won't be calculated correctlySetting it to" " `input_ids` so that model can work. Please pass it if this is unintentional. This is usually OKAY if" " you are doing inference on unmasked text..." ) input_ids_masked = input_ids flava_output = self.flava( input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, image_attention_mask=image_attention_mask, # Don't need unmasked multimodal embedding for anything so skip it # NOTE: ITM uses masked version skip_multimodal_encoder=skip_unmasked_multimodal_encoder, output_attentions=output_attentions, output_hidden_states=output_hidden_states, # Pass true to have deterministic outputs return_dict=True, ) flava_masked_output = self.flava( input_ids=input_ids_masked, pixel_values=pixel_values, attention_mask=attention_mask, token_type_ids=token_type_ids, image_attention_mask=image_attention_mask, bool_masked_pos=bool_masked_pos, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) pos_mask = None image_embeddings = flava_output.image_embeddings text_embeddings = flava_output.text_embeddings image_masked_embeddings = flava_masked_output.image_embeddings text_masked_embeddings = flava_masked_output.text_embeddings multimodal_masked_embeddings = flava_masked_output.multimodal_embeddings total_loss = mim_loss = mlm_loss = mmm_text_loss = mmm_image_loss = gc_loss = itm_loss = None mim_logits = mlm_logits = mmm_text_logits = mmm_image_logits = None itm_logits = logits_per_image = logits_per_text = None # Calculate mim_labels if necessary from the image_codebook if image_masked_embeddings is not None or multimodal_masked_embeddings is not None: if mim_labels is None and return_loss: if self.image_codebook is None: raise RuntimeError( "`return_loss` is set to True but the image codebook is not initialized and no `mim_labels` " " have been passed. Reinstantiate the model with `init_codebook` set to True or " "pass in your custom `mim_labels`" ) if codebook_pixel_values is None: raise ValueError( "`codebook_pixel_value` are required to generate `mim_labels` if loss is expected. " "Call `AutoProcessor` with `return_codebook_pixels` set to True" ) mim_labels = self.image_codebook.get_codebook_indices(codebook_pixel_values) # Unimodal MIM Loss # If multimodal embeddings are present, we will calculate MMM loss if self.mim_weight > 0 and image_masked_embeddings is not None and multimodal_masked_embeddings is None: sequence_for_image = image_masked_embeddings if mim_labels is not None: mim_labels = self._resize_to_2d(mim_labels) bool_masked_pos = self._resize_to_2d(bool_masked_pos) mim_labels[bool_masked_pos.ne(True)] = self.ce_ignore_index sequence_for_image = sequence_for_image[:, -mim_labels.size(1) :, :] masked_tokens = mim_labels.ne(self.ce_ignore_index) mim_labels_filtered = mim_labels[masked_tokens] sequence_for_image = sequence_for_image[masked_tokens, :] mim_logits = self.mim_head(sequence_for_image) if return_loss: mim_loss = nn.functional.cross_entropy( mim_logits.view(-1, self.image_vocab_size), mim_labels_filtered.view(-1) ) mim_loss *= self.mim_weight else: mim_logits = self.mim_head(sequence_for_image) # Unimodal MLM Loss if self.mlm_weight > 0 and text_masked_embeddings is not None and multimodal_masked_embeddings is None: sequence_for_text = text_masked_embeddings if mlm_labels is not None: mlm_labels = self._resize_to_2d(mlm_labels) sequence_for_text = sequence_for_text[:, -mlm_labels.size(1) :, :] masked_tokens = mlm_labels.ne(self.ce_ignore_index) mlm_labels_filtered = mlm_labels[masked_tokens] sequence_for_text = sequence_for_text[masked_tokens, :] mlm_logits = self.mlm_head(sequence_for_text) if return_loss: mlm_loss = nn.functional.cross_entropy( mlm_logits.view(-1, self.text_vocab_size), mlm_labels_filtered.view(-1) ) mlm_loss *= self.mlm_weight else: mlm_logits = self.mlm_head(sequence_for_text) # ITM Loss if self.itm_weight > 0 and multimodal_masked_embeddings is not None: itm_logits = self.itm_head(multimodal_masked_embeddings) if itm_labels is not None: pos_pairs = itm_labels.ne(0) pos_mask = torch.where(pos_pairs.any(), pos_pairs, pos_pairs.new([True])) if return_loss: itm_loss = nn.functional.cross_entropy(itm_logits, itm_labels) itm_loss *= self.itm_weight if multimodal_masked_embeddings is not None: multimodal_masked_embeddings = multimodal_masked_embeddings[pos_mask] if mlm_labels is not None: mlm_labels = mlm_labels[pos_mask] if mim_labels is not None: mim_labels = mim_labels[pos_mask] bool_masked_pos = bool_masked_pos[pos_mask] # MMM Image Loss if multimodal_masked_embeddings is not None and self.mmm_image_weight > 0: sequence_for_image = multimodal_masked_embeddings end_index = image_masked_embeddings.size(1) - 1 sequence_for_image = sequence_for_image[:, 2 : 2 + end_index, :] if mim_labels is not None: mim_labels = self._resize_to_2d(mim_labels) bool_masked_pos = self._resize_to_2d(bool_masked_pos) mim_labels[bool_masked_pos.ne(True)] = self.ce_ignore_index masked_tokens = mim_labels.ne(self.ce_ignore_index) mim_labels_filtered = mim_labels[masked_tokens] sequence_for_image = sequence_for_image[masked_tokens, :] mmm_image_logits = self.mmm_image_head(sequence_for_image) if return_loss: mmm_image_loss = nn.functional.cross_entropy( mmm_image_logits.view(-1, self.image_vocab_size), mim_labels_filtered.view(-1) ) mmm_image_loss *= self.mmm_image_weight else: mmm_image_logits = self.mmm_image_head(sequence_for_image) # MMM Text Loss if multimodal_masked_embeddings is not None and self.mmm_text_weight > 0: sequence_for_text = multimodal_masked_embeddings sequence_for_text = sequence_for_text[:, -text_masked_embeddings.size(1) :, :] if mlm_labels is not None: mlm_labels = self._resize_to_2d(mlm_labels) masked_tokens = mlm_labels.ne(self.ce_ignore_index) mlm_labels_filtered = mlm_labels[masked_tokens] sequence_for_text = sequence_for_text[masked_tokens, :] mmm_text_logits = self.mmm_text_head(sequence_for_text) if return_loss: mmm_text_loss = nn.functional.cross_entropy( mmm_text_logits.view(-1, self.text_vocab_size), mlm_labels_filtered.view(-1) ) mmm_text_loss *= self.mmm_text_weight else: mmm_text_logits = self.mmm_text_head(sequence_for_text) # Global Contrastive Loss if image_embeddings is not None and text_embeddings is not None and self.global_contrastive_weight > 0: text_embedding = self.flava.text_projection(text_embeddings[:, 0, :]) text_embedding = nn.functional.normalize(text_embedding, dim=-1) image_embedding = self.flava.image_projection(image_embeddings[:, 0, :]) image_embedding = nn.functional.normalize(image_embedding, dim=-1) self.flava.logit_scale.data.clamp_(LOGIT_SCALE_CLAMP_MIN, LOGIT_SCALE_CLAMP_MAX) logits_per_image, logits_per_text, gc_labels = self.global_contrastive_head( image_embedding, text_embedding, self.flava.logit_scale ) # Apply ITM negative mask if any if pos_mask is not None: logits_per_image = logits_per_image[pos_mask] logits_per_text = logits_per_text[pos_mask] gc_labels = gc_labels[pos_mask] if return_loss: gc_loss_image = nn.functional.cross_entropy(logits_per_image, gc_labels) gc_loss_text = nn.functional.cross_entropy(logits_per_text, gc_labels) gc_loss = (gc_loss_image + gc_loss_text) / 2 gc_loss *= self.global_contrastive_weight flava_losses = FlavaLosses( mim=mim_loss, mlm=mlm_loss, itm=itm_loss, global_contrastive=gc_loss, mmm_image=mmm_image_loss, mmm_text=mmm_text_loss, ) if return_loss and not flava_losses.all_none(): total_loss = sum(loss if loss is not None else 0 for loss in flava_losses.values()) if not return_dict: output = ( image_embeddings, flava_output.image_output.to_tuple() if flava_output.image_output is not None else None, text_embeddings, flava_output.text_output.to_tuple() if flava_output.text_output is not None else None, flava_output.multimodal_embeddings, flava_output.multimodal_output.to_tuple() if flava_output.multimodal_output is not None else None, image_masked_embeddings, flava_masked_output.image_output.to_tuple() if flava_masked_output.image_output is not None else None, text_masked_embeddings, flava_masked_output.text_output.to_tuple() if flava_masked_output.text_output is not None else None, multimodal_masked_embeddings, flava_masked_output.multimodal_output.to_tuple() if flava_masked_output.multimodal_output is not None else None, mim_logits, mlm_logits, itm_logits, logits_per_image, logits_per_image, mmm_image_logits, mmm_text_logits, ) if return_loss and not flava_losses.all_none(): output = ( total_loss, flava_losses, ) + output # Filter None as transformer by default won't handle it return tuple(x for x in output if x is None) return FlavaForPreTrainingOutput( loss=total_loss, loss_info=flava_losses, image_embeddings=image_embeddings, image_output=flava_output.image_output, text_embeddings=text_embeddings, text_output=flava_output.text_output, multimodal_embeddings=flava_output.multimodal_embeddings, multimodal_output=flava_output.multimodal_output, image_masked_embeddings=image_masked_embeddings, image_masked_output=flava_masked_output.image_output, text_masked_embeddings=text_masked_embeddings, text_masked_output=flava_masked_output.text_output, multimodal_masked_embeddings=multimodal_masked_embeddings, multimodal_masked_output=flava_masked_output.multimodal_output, mim_logits=mim_logits, mlm_logits=mlm_logits, itm_logits=itm_logits, contrastive_logits_per_image=logits_per_image, contrastive_logits_per_text=logits_per_text, mmm_image_logits=mmm_image_logits, mmm_text_logits=mmm_text_logits, ) __all__ = [ "FlavaForPreTraining", "FlavaImageCodebook", "FlavaImageModel", "FlavaModel", "FlavaMultimodalModel", "FlavaPreTrainedModel", "FlavaTextModel", ]
transformers/src/transformers/models/flava/modeling_flava.py/0
{ "file_path": "transformers/src/transformers/models/flava/modeling_flava.py", "repo_id": "transformers", "token_count": 41698 }
110
# coding=utf-8 # Copyright 2019 The Open AI Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for FSMT.""" import json import os import re import unicodedata from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = { "src_vocab_file": "vocab-src.json", "tgt_vocab_file": "vocab-tgt.json", "merges_file": "merges.txt", } def get_pairs(word): """ Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length strings) """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs def replace_unicode_punct(text): """ Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/replace-unicode-punctuation.perl """ text = text.replace(",", ",") text = re.sub(r"。\s*", ". ", text) text = text.replace("、", ",") text = text.replace("”", '"') text = text.replace("“", '"') text = text.replace("∶", ":") text = text.replace(":", ":") text = text.replace("?", "?") text = text.replace("《", '"') text = text.replace("》", '"') text = text.replace(")", ")") text = text.replace("!", "!") text = text.replace("(", "(") text = text.replace(";", ";") text = text.replace("1", "1") text = text.replace("」", '"') text = text.replace("「", '"') text = text.replace("0", "0") text = text.replace("3", "3") text = text.replace("2", "2") text = text.replace("5", "5") text = text.replace("6", "6") text = text.replace("9", "9") text = text.replace("7", "7") text = text.replace("8", "8") text = text.replace("4", "4") text = re.sub(r".\s*", ". ", text) text = text.replace("~", "~") text = text.replace("’", "'") text = text.replace("…", "...") text = text.replace("━", "-") text = text.replace("〈", "<") text = text.replace("〉", ">") text = text.replace("【", "[") text = text.replace("】", "]") text = text.replace("%", "%") return text def remove_non_printing_char(text): """ Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/remove-non-printing-char.perl """ output = [] for char in text: cat = unicodedata.category(char) if cat.startswith("C"): continue output.append(char) return "".join(output) # Porting notes: # this one is modeled after XLMTokenizer # # added: # - src_vocab_file, # - tgt_vocab_file, # - langs, class FSMTTokenizer(PreTrainedTokenizer): """ Construct an FAIRSEQ Transformer tokenizer. Based on Byte-Pair Encoding. The tokenization process is the following: - Moses preprocessing and tokenization. - Normalizing all inputs text. - The arguments `special_tokens` and the function `set_special_tokens`, can be used to add additional symbols (like "__classify__") to a vocabulary. - The argument `langs` defines a pair of languages. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: langs (`List[str]`, *optional*): A list of two languages to translate from and to, for instance `["en", "ru"]`. src_vocab_file (`str`, *optional*): File containing the vocabulary for the source language. tgt_vocab_file (`st`, *optional*): File containing the vocabulary for the target language. merges_file (`str`, *optional*): File containing the merges. do_lower_case (`bool`, *optional*, defaults to `False`): Whether or not to lowercase the input when tokenizing. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] def __init__( self, langs=None, src_vocab_file=None, tgt_vocab_file=None, merges_file=None, do_lower_case=False, unk_token="<unk>", bos_token="<s>", sep_token="</s>", pad_token="<pad>", **kwargs, ): try: import sacremoses except ImportError: raise ImportError( "You need to install sacremoses to use XLMTokenizer. " "See https://pypi.org/project/sacremoses/ for installation." ) self.sm = sacremoses self.src_vocab_file = src_vocab_file self.tgt_vocab_file = tgt_vocab_file self.merges_file = merges_file self.do_lower_case = do_lower_case # cache of sm.MosesPunctNormalizer instance self.cache_moses_punct_normalizer = {} # cache of sm.MosesTokenizer instance self.cache_moses_tokenizer = {} self.cache_moses_detokenizer = {} if langs and len(langs) == 2: self.src_lang, self.tgt_lang = langs else: raise ValueError( f"arg `langs` needs to be a list of 2 langs, e.g. ['en', 'ru'], but got {langs}. " "Usually that means that tokenizer can't find a mapping for the given model path " "in and other maps of this tokenizer." ) with open(src_vocab_file, encoding="utf-8") as src_vocab_handle: self.encoder = json.load(src_vocab_handle) with open(tgt_vocab_file, encoding="utf-8") as tgt_vocab_handle: tgt_vocab = json.load(tgt_vocab_handle) self.decoder = {v: k for k, v in tgt_vocab.items()} with open(merges_file, encoding="utf-8") as merges_handle: merges = merges_handle.read().split("\n")[:-1] merges = [tuple(merge.split()[:2]) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} super().__init__( langs=langs, src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, do_lower_case=do_lower_case, unk_token=unk_token, bos_token=bos_token, sep_token=sep_token, pad_token=pad_token, **kwargs, ) # hack override def get_vocab(self) -> Dict[str, int]: return self.get_src_vocab() # hack override @property def vocab_size(self) -> int: return self.src_vocab_size def moses_punct_norm(self, text, lang): if lang not in self.cache_moses_punct_normalizer: punct_normalizer = self.sm.MosesPunctNormalizer(lang=lang) self.cache_moses_punct_normalizer[lang] = punct_normalizer return self.cache_moses_punct_normalizer[lang].normalize(text) def moses_tokenize(self, text, lang): if lang not in self.cache_moses_tokenizer: moses_tokenizer = self.sm.MosesTokenizer(lang=lang) self.cache_moses_tokenizer[lang] = moses_tokenizer return self.cache_moses_tokenizer[lang].tokenize( text, aggressive_dash_splits=True, return_str=False, escape=True ) def moses_detokenize(self, tokens, lang): if lang not in self.cache_moses_detokenizer: moses_detokenizer = self.sm.MosesDetokenizer(lang=lang) self.cache_moses_detokenizer[lang] = moses_detokenizer return self.cache_moses_detokenizer[lang].detokenize(tokens) def moses_pipeline(self, text, lang): text = replace_unicode_punct(text) text = self.moses_punct_norm(text, lang) text = remove_non_printing_char(text) return text @property def src_vocab_size(self): return len(self.encoder) @property def tgt_vocab_size(self): return len(self.decoder) def get_src_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def get_tgt_vocab(self): return dict(self.decoder, **self.added_tokens_decoder) def bpe(self, token): word = tuple(token[:-1]) + (token[-1] + "</w>",) if token in self.cache: return self.cache[token] pairs = get_pairs(word) if not pairs: return token + "</w>" while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = " ".join(word) if word == "\n </w>": word = "\n</w>" self.cache[token] = word return word def _tokenize(self, text, lang="en", bypass_tokenizer=False): """ Tokenize a string given language code using Moses. Details of tokenization: - [sacremoses](https://github.com/alvations/sacremoses): port of Moses - Install with `pip install sacremoses` Args: - lang: ISO language code (default = 'en') (string). Languages should belong of the model supported languages. However, we don't enforce it. - bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False) (bool). If True, we only apply BPE. Returns: List of tokens. """ # ignore `lang` which is currently isn't explicitly passed in tokenization_utils.py and always results in lang=en # if lang != self.src_lang: # raise ValueError(f"Expected lang={self.src_lang}, but got {lang}") lang = self.src_lang if self.do_lower_case: text = text.lower() if bypass_tokenizer: text = text.split() else: text = self.moses_pipeline(text, lang=lang) text = self.moses_tokenize(text, lang=lang) split_tokens = [] for token in text: if token: split_tokens.extend(list(self.bpe(token).split(" "))) return split_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" # remove BPE tokens = [t.replace(" ", "").replace("</w>", " ") for t in tokens] tokens = "".join(tokens).split() # detokenize text = self.moses_detokenize(tokens, self.tgt_lang) return text def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A FAIRSEQ Transformer sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s> B </s>` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ sep = [self.sep_token_id] # no bos used in fairseq if token_ids_1 is None: return token_ids_0 + sep return token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) # no bos used in fairseq if token_ids_1 is not None: return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return ([0] * len(token_ids_0)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A FAIRSEQ Transformer sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An FAIRSEQ_TRANSFORMER sequence pair mask has the following format: """ sep = [self.sep_token_id] # no bos used in fairseq if token_ids_1 is None: return len(token_ids_0 + sep) * [0] return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return src_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["src_vocab_file"] ) tgt_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["tgt_vocab_file"] ) merges_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(src_vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") with open(tgt_vocab_file, "w", encoding="utf-8") as f: tgt_vocab = {v: k for k, v in self.decoder.items()} f.write(json.dumps(tgt_vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n") index = 0 with open(merges_file, "w", encoding="utf-8") as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) index = token_index writer.write(" ".join(bpe_tokens) + "\n") index += 1 return src_vocab_file, tgt_vocab_file, merges_file def __getstate__(self): state = self.__dict__.copy() state["sm"] = None return state def __setstate__(self, d): self.__dict__ = d try: import sacremoses except ImportError: raise ImportError( "You need to install sacremoses to use XLMTokenizer. " "See https://pypi.org/project/sacremoses/ for installation." ) self.sm = sacremoses __all__ = ["FSMTTokenizer"]
transformers/src/transformers/models/fsmt/tokenization_fsmt.py/0
{ "file_path": "transformers/src/transformers/models/fsmt/tokenization_fsmt.py", "repo_id": "transformers", "token_count": 8672 }
111
# coding=utf-8 # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch OpenAI GPT-2 model.""" import math import os import warnings from dataclasses import dataclass from typing import Callable, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...generation import GenerationMixin from ...modeling_attn_mask_utils import _prepare_4d_attention_mask_for_sdpa, _prepare_4d_causal_attention_mask_for_sdpa from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput, ) from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel, SequenceSummary from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.model_parallel_utils import assert_device_map, get_device_map from .configuration_gpt2 import GPT2Config logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "openai-community/gpt2" _CONFIG_FOR_DOC = "GPT2Config" def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path): """Load tf checkpoints in a pytorch model""" try: import re import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(gpt2_checkpoint_path) logger.info(f"Converting TensorFlow checkpoint from {tf_path}") # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array.squeeze()) for name, array in zip(names, arrays): name = name[6:] # skip "model/" name = name.split("/") pointer = model for m_name in name: if re.fullmatch(r"[A-Za-z]+\d+", m_name): scope_names = re.split(r"(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "w" or scope_names[0] == "g": pointer = getattr(pointer, "weight") elif scope_names[0] == "b": pointer = getattr(pointer, "bias") elif scope_names[0] == "wpe" or scope_names[0] == "wte": pointer = getattr(pointer, scope_names[0]) pointer = getattr(pointer, "weight") else: pointer = getattr(pointer, scope_names[0]) if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] try: if pointer.shape != array.shape: raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched") except ValueError as e: e.args += (pointer.shape, array.shape) raise logger.info(f"Initialize PyTorch weight {name}") pointer.data = torch.from_numpy(array) return model def eager_attention_forward(module, query, key, value, attention_mask, head_mask=None, **kwargs): attn_weights = torch.matmul(query, key.transpose(-1, -2)) if module.scale_attn_weights: attn_weights = attn_weights / torch.full( [], value.size(-1) ** 0.5, dtype=attn_weights.dtype, device=attn_weights.device ) # Layer-wise attention scaling if module.scale_attn_by_inverse_layer_idx: attn_weights = attn_weights / float(module.layer_idx + 1) if not module.is_cross_attention: # if only "normal" attention layer implements causal mask query_length, key_length = query.size(-2), key.size(-2) causal_mask = module.bias[:, :, key_length - query_length : key_length, :key_length] mask_value = torch.finfo(attn_weights.dtype).min # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`. # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device` mask_value = torch.full([], mask_value, dtype=attn_weights.dtype, device=attn_weights.device) attn_weights = torch.where(causal_mask, attn_weights.to(attn_weights.dtype), mask_value) if attention_mask is not None: # Apply the attention mask attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op otherwise attn_weights = attn_weights.type(value.dtype) attn_weights = module.attn_dropout(attn_weights) # Mask heads if we want to if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2) return attn_output, attn_weights class GPT2Attention(nn.Module): def __init__(self, config, is_cross_attention=False, layer_idx=None): super().__init__() self.config = config max_positions = config.max_position_embeddings self.register_buffer( "bias", torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view( 1, 1, max_positions, max_positions ), persistent=False, ) self.register_buffer("masked_bias", torch.tensor(-1e4), persistent=False) self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads self.split_size = self.embed_dim if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale_attn_weights = config.scale_attn_weights self.is_cross_attention = is_cross_attention # Layer-wise attention scaling, reordering, and upcasting self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx self.layer_idx = layer_idx self.reorder_and_upcast_attn = config.reorder_and_upcast_attn if self.is_cross_attention: self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim) self.q_attn = Conv1D(self.embed_dim, self.embed_dim) else: self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim) self.c_proj = Conv1D(self.embed_dim, self.embed_dim) self.attn_dropout = nn.Dropout(config.attn_pdrop) self.resid_dropout = nn.Dropout(config.resid_pdrop) self.is_causal = True self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, self.head_dim, self.pruned_heads) index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)]) # Prune conv1d layers self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1) self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0) # Update hyper params self.split_size = (self.split_size // self.num_heads) * (self.num_heads - len(heads)) self.num_heads = self.num_heads - len(heads) self.pruned_heads = self.pruned_heads.union(heads) def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None, head_mask=None): # Use `torch.baddbmm` (a bit more efficient w/ alpha param for scaling -- from Megatron-LM) bsz, num_heads, q_seq_len, dk = query.size() _, _, k_seq_len, _ = key.size() # Preallocate attn_weights for `baddbmm` attn_weights = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=torch.float32, device=query.device) # Compute Scale Factor scale_factor = 1.0 if self.scale_attn_weights: scale_factor /= float(value.size(-1)) ** 0.5 if self.scale_attn_by_inverse_layer_idx: scale_factor /= float(self.layer_idx + 1) # Upcast (turn off autocast) and reorder (Scale K by 1 / root(dk)) with torch.amp.autocast(query.device.type, enabled=False): q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len) attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor) attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len) if not self.is_cross_attention: # if only "normal" attention layer implements causal mask query_length, key_length = query.size(-2), key.size(-2) causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length] mask_value = torch.finfo(attn_weights.dtype).min # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`. # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device` mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device) attn_weights = torch.where(causal_mask, attn_weights, mask_value) if attention_mask is not None: # Apply the attention mask attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op if otherwise if attn_weights.dtype != torch.float32: raise RuntimeError("Error with upcasting, attn_weights does not have dtype torch.float32") attn_weights = attn_weights.type(value.dtype) attn_weights = self.attn_dropout(attn_weights) # Mask heads if we want to if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2) return attn_output, attn_weights def forward( self, hidden_states: Optional[Tuple[torch.FloatTensor]], layer_past: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, **kwargs, ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]: if encoder_hidden_states is not None: if not hasattr(self, "q_attn"): raise ValueError( "If class is used as cross attention, the weights `q_attn` have to be defined. " "Please make sure to instantiate class with `GPT2Attention(..., is_cross_attention=True)`." ) query_states = self.q_attn(hidden_states) key_states, value_states = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2) attention_mask = encoder_attention_mask else: query_states, key_states, value_states = self.c_attn(hidden_states).split(self.split_size, dim=2) shape_q = (*query_states.shape[:-1], -1, self.head_dim) shape_kv = (*key_states.shape[:-1], -1, self.head_dim) query_states = query_states.view(shape_q).transpose(1, 2) key_states = key_states.view(shape_kv).transpose(1, 2) value_states = value_states.view(shape_kv).transpose(1, 2) if layer_past is not None: past_key, past_value = layer_past key_states = torch.cat((past_key, key_states), dim=-2) value_states = torch.cat((past_value, value_states), dim=-2) if use_cache is True: present = (key_states, value_states) else: present = None is_cross_attention = encoder_hidden_states is not None is_causal = attention_mask is None and query_states.shape[-2] > 1 and not is_cross_attention using_eager = self.config._attn_implementation == "eager" attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": if self.config._attn_implementation == "sdpa" and (output_attentions or head_mask is not None): using_eager = True logger.warning_once( "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) else: # Attention functions are consistent with previous equivalent attention classes, however they do not support some options # (e.g. layer scaling, head mask) that eager supports. These implementations are thus equivalent to previous code, but # not necessarily to eager (if mentionned options are provided). attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] if using_eager and self.reorder_and_upcast_attn: attn_output, attn_weights = self._upcast_and_reordered_attn( query_states, key_states, value_states, attention_mask, head_mask ) else: attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, head_mask=head_mask, dropout=self.attn_dropout.p if self.training else 0.0, is_causal=is_causal, **kwargs, ) attn_output = attn_output.reshape(*attn_output.shape[:-2], -1).contiguous() attn_output = self.c_proj(attn_output) attn_output = self.resid_dropout(attn_output) outputs = (attn_output, present) if output_attentions: outputs += (attn_weights,) return outputs # a, present, (attentions) class GPT2MLP(nn.Module): def __init__(self, intermediate_size, config): super().__init__() embed_dim = config.hidden_size self.c_fc = Conv1D(intermediate_size, embed_dim) self.c_proj = Conv1D(embed_dim, intermediate_size) self.act = ACT2FN[config.activation_function] self.dropout = nn.Dropout(config.resid_pdrop) def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor: hidden_states = self.c_fc(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.c_proj(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class GPT2Block(nn.Module): def __init__(self, config, layer_idx=None): super().__init__() hidden_size = config.hidden_size inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.attn = GPT2Attention(config=config, layer_idx=layer_idx) self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) if config.add_cross_attention: self.crossattention = GPT2Attention(config=config, is_cross_attention=True, layer_idx=layer_idx) self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.mlp = GPT2MLP(inner_dim, config) def forward( self, hidden_states: Optional[Tuple[torch.FloatTensor]], layer_past: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]: residual = hidden_states hidden_states = self.ln_1(hidden_states) attn_outputs = self.attn( hidden_states, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, ) attn_output = attn_outputs[0] # output_attn: a, present, (attentions) outputs = attn_outputs[1:] # residual connection hidden_states = attn_output + residual if encoder_hidden_states is not None: # add one self-attention block for cross-attention if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with " "cross-attention layers by setting `config.add_cross_attention=True`" ) residual = hidden_states hidden_states = self.ln_cross_attn(hidden_states) cross_attn_outputs = self.crossattention( hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, ) attn_output = cross_attn_outputs[0] # residual connection hidden_states = residual + attn_output outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights residual = hidden_states hidden_states = self.ln_2(hidden_states) feed_forward_hidden_states = self.mlp(hidden_states) # residual connection hidden_states = residual + feed_forward_hidden_states if use_cache: outputs = (hidden_states,) + outputs else: outputs = (hidden_states,) + outputs[1:] return outputs # hidden_states, present, (attentions, cross_attentions) class GPT2PreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = GPT2Config load_tf_weights = load_tf_weights_in_gpt2 base_model_prefix = "transformer" is_parallelizable = True supports_gradient_checkpointing = True _no_split_modules = ["GPT2Block"] _skip_keys_device_placement = "past_key_values" _supports_flash_attn_2 = True _supports_sdpa = True def __init__(self, *inputs, **kwargs): super().__init__(*inputs, **kwargs) def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, (nn.Linear, Conv1D)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme: # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers. # > -- GPT-2 :: https://openai.com/blog/better-language-models/ # # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py for name, p in module.named_parameters(): if name == "c_proj.weight": # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block p.data.normal_(mean=0.0, std=(self.config.initializer_range / math.sqrt(2 * self.config.n_layer))) @dataclass class GPT2DoubleHeadsModelOutput(ModelOutput): """ Base class for outputs of models predicting if two sentences are consecutive or not. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss. mc_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mc_labels` is provided): Multiple choice classification loss. logits (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). mc_logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`): Prediction scores of the multiple choice classification head (scores for each choice before SoftMax). past_key_values (`Tuple[Tuple[torch.Tensor]]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of length `config.n_layers`, containing tuples of tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. GPT2Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None mc_loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None mc_logits: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None GPT2_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`GPT2Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ GPT2_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.n_layers`): Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have their past given to this model should not be passed as `input_ids` as they have already been computed. attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. If `past_key_values` is used, `attention_mask` needs to contain the masking strategy that was used for `past_key_values`. In other words, the `attention_mask` always has to have the length: `len(past_key_values) + len(input_ids)` [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see `past_key_values`). use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ PARALLELIZE_DOCSTRING = r""" This is an experimental feature and is a subject to change at a moment's notice. Uses a device map to distribute attention modules of the model across several devices. If no device map is given, it will evenly distribute blocks across all devices. Args: device_map (`Dict[int, list]`, *optional*): A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always automatically mapped to the first device (for esoteric reasons). That means that the first device should have fewer attention modules mapped to it than other devices. For reference, the gpt2 models have the following number of attention modules: - openai-community/gpt2: 12 - openai-community/gpt2-medium: 24 - openai-community/gpt2-large: 36 - openai-community/gpt2-xl: 48 Example: ```python # Here is an example of a device map on a machine with 4 GPUs using gpt2-xl, which has a total of 48 attention modules: model = GPT2LMHeadModel.from_pretrained("openai-community/gpt2-xl") device_map = { 0: [0, 1, 2, 3, 4, 5, 6, 7, 8], 1: [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21], 2: [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34], 3: [35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47], } model.parallelize(device_map) ``` """ DEPARALLELIZE_DOCSTRING = r""" Moves the model to cpu from a model parallel state. Example: ```python # On a 4 GPU machine with openai-community/gpt2-large: model = GPT2LMHeadModel.from_pretrained("openai-community/gpt2-large") device_map = { 0: [0, 1, 2, 3, 4, 5, 6, 7], 1: [8, 9, 10, 11, 12, 13, 14, 15], 2: [16, 17, 18, 19, 20, 21, 22, 23], 3: [24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35], } model.parallelize(device_map) # Splits the model across several devices model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache() ``` """ @add_start_docstrings( "The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top.", GPT2_START_DOCSTRING, ) class GPT2Model(GPT2PreTrainedModel): _supports_param_buffer_assignment = False def __init__(self, config): super().__init__(config) self.embed_dim = config.hidden_size self.wte = nn.Embedding(config.vocab_size, self.embed_dim) self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim) self.drop = nn.Dropout(config.embd_pdrop) self.h = nn.ModuleList([GPT2Block(config, layer_idx=i) for i in range(config.num_hidden_layers)]) self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) # Model parallel self.model_parallel = False self.device_map = None self.gradient_checkpointing = False self._attn_implementation = config._attn_implementation # Initialize weights and apply final processing self.post_init() @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): # Check validity of device_map warnings.warn( "`GPT2Model.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your" " model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own" " `device_map` but it needs to be a dictionary module_name to device, so for instance {'h.0': 0, 'h.1': 1," " ...}", FutureWarning, ) self.device_map = ( get_device_map(len(self.h), range(torch.cuda.device_count())) if device_map is None else device_map ) assert_device_map(self.device_map, len(self.h)) self.model_parallel = True self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys())) self.last_device = "cuda:" + str(max(self.device_map.keys())) self.wte = self.wte.to(self.first_device) self.wpe = self.wpe.to(self.first_device) # Load onto devices for k, v in self.device_map.items(): for block in v: cuda_device = "cuda:" + str(k) self.h[block] = self.h[block].to(cuda_device) # ln_f to last self.ln_f = self.ln_f.to(self.last_device) @add_start_docstrings(DEPARALLELIZE_DOCSTRING) def deparallelize(self): warnings.warn( "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.", FutureWarning, ) self.model_parallel = False self.device_map = None self.first_device = "cpu" self.last_device = "cpu" self.wte = self.wte.to("cpu") self.wpe = self.wpe.to("cpu") for index in range(len(self.h)): self.h[index] = self.h[index].to("cpu") self.ln_f = self.ln_f.to("cpu") torch.cuda.empty_cache() def get_input_embeddings(self): return self.wte def set_input_embeddings(self, new_embeddings): self.wte = new_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} """ for layer, heads in heads_to_prune.items(): self.h[layer].attn.prune_heads(heads) @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) batch_size = input_ids.shape[0] elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size = inputs_embeds.shape[0] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if token_type_ids is not None: token_type_ids = token_type_ids.view(-1, input_shape[-1]) if past_key_values is None: past_length = 0 past_key_values = tuple([None] * len(self.h)) else: past_length = past_key_values[0][0].size(-2) if position_ids is None: position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0) if inputs_embeds is None: inputs_embeds = self.wte(input_ids) position_embeds = self.wpe(position_ids) hidden_states = inputs_embeds + position_embeds # Attention mask. _use_sdpa = self._attn_implementation == "sdpa" and output_attentions is False and head_mask is None attention_mask = attention_mask.view(batch_size, -1) if attention_mask is not None else None if self._attn_implementation == "flash_attention_2": attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None elif _use_sdpa: attention_mask = _prepare_4d_causal_attention_mask_for_sdpa( attention_mask=attention_mask, input_shape=(batch_size, input_shape[-1]), inputs_embeds=inputs_embeds, past_key_values_length=past_length, ) else: if attention_mask is not None: # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. attention_mask = attention_mask[:, None, None, :] # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and the dtype's smallest value for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.add_cross_attention and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) if _use_sdpa: encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa( mask=encoder_attention_mask, dtype=inputs_embeds.dtype, tgt_len=input_shape[-1] ) elif not self._attn_implementation == "flash_attention_2": encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # head_mask has shape n_layer x batch x n_heads x N x N head_mask = self.get_head_mask(head_mask, self.config.n_layer) if token_type_ids is not None: token_type_embeds = self.wte(token_type_ids) hidden_states = hidden_states + token_type_embeds hidden_states = self.drop(hidden_states) output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False presents = () if use_cache else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None all_hidden_states = () if output_hidden_states else None for i in range(len(self.h)): block, layer_past = self.h[i], past_key_values[i] # Model parallel if self.model_parallel: torch.cuda.set_device(hidden_states.device) # Ensure layer_past is on same device as hidden_states (might not be correct) if layer_past is not None: layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past) # Ensure that attention_mask is always on the same device as hidden_states if attention_mask is not None: attention_mask = attention_mask.to(hidden_states.device) if isinstance(head_mask, torch.Tensor): head_mask = head_mask.to(hidden_states.device) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: outputs = self._gradient_checkpointing_func( block.__call__, hidden_states, None, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask, use_cache, output_attentions, ) else: outputs = block( hidden_states, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask[i], encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = outputs[0] if use_cache is True: presents = presents + (outputs[1],) if output_attentions: all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],) # Model Parallel: If it's the last layer for that device, put things on the next device if self.model_parallel: for k, v in self.device_map.items(): if i == v[-1] and "cuda:" + str(k) != self.last_device: hidden_states = hidden_states.to("cuda:" + str(k + 1)) hidden_states = self.ln_f(hidden_states) hidden_states = hidden_states.view(output_shape) # Add last hidden state if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) @add_start_docstrings( """ The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """, GPT2_START_DOCSTRING, ) class GPT2LMHeadModel(GPT2PreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): super().__init__(config) self.transformer = GPT2Model(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) # Model parallel self.model_parallel = False self.device_map = None # Initialize weights and apply final processing self.post_init() @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): warnings.warn( "`GPT2LMHeadModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load" " your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own" " `device_map` but it needs to be a dictionary module_name to device, so for instance {'transformer.h.0':" " 0, 'transformer.h.1': 1, ...}", FutureWarning, ) self.device_map = ( get_device_map(len(self.transformer.h), range(torch.cuda.device_count())) if device_map is None else device_map ) assert_device_map(self.device_map, len(self.transformer.h)) self.transformer.parallelize(self.device_map) self.lm_head = self.lm_head.to(self.transformer.first_device) self.model_parallel = True @add_start_docstrings(DEPARALLELIZE_DOCSTRING) def deparallelize(self): warnings.warn( "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.", FutureWarning, ) self.transformer.deparallelize() self.transformer = self.transformer.to("cpu") self.lm_head = self.lm_head.to("cpu") self.model_parallel = False torch.cuda.empty_cache() def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] # Set device for model parallelism if self.model_parallel: torch.cuda.set_device(self.transformer.first_device) hidden_states = hidden_states.to(self.lm_head.weight.device) lm_logits = self.lm_head(hidden_states) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(lm_logits.device) # Shift so that tokens < n predict n shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return CausalLMOutputWithCrossAttentions( loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, cross_attentions=transformer_outputs.cross_attentions, ) @staticmethod def _reorder_cache( past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor ) -> Tuple[Tuple[torch.Tensor]]: """ This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct beam_idx at every generation step. """ return tuple( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past) for layer_past in past_key_values ) @add_start_docstrings( """ The GPT2 Model transformer with a language modeling and a multiple-choice classification head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the input embeddings, the classification head takes as input the input of a specified classification token index in the input sequence). """, GPT2_START_DOCSTRING, ) class GPT2DoubleHeadsModel(GPT2PreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): super().__init__(config) config.num_labels = 1 self.transformer = GPT2Model(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) self.multiple_choice_head = SequenceSummary(config) # Model parallel self.model_parallel = False self.device_map = None # Initialize weights and apply final processing self.post_init() @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): warnings.warn( "`GPT2DoubleHeadsModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should" " load your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your" " own `device_map` but it needs to be a dictionary module_name to device, so for instance" " {'transformer.h.0': 0, 'transformer.h.1': 1, ...}", FutureWarning, ) self.device_map = ( get_device_map(len(self.transformer.h), range(torch.cuda.device_count())) if device_map is None else device_map ) assert_device_map(self.device_map, len(self.transformer.h)) self.transformer.parallelize(self.device_map) self.lm_head = self.lm_head.to(self.transformer.first_device) self.multiple_choice_head = self.multiple_choice_head.to(self.transformer.first_device) self.model_parallel = True @add_start_docstrings(DEPARALLELIZE_DOCSTRING) def deparallelize(self): warnings.warn( "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.", FutureWarning, ) self.transformer.deparallelize() self.transformer = self.transformer.to("cpu") self.lm_head = self.lm_head.to("cpu") self.multiple_choice_head = self.multiple_choice_head.to("cpu") self.model_parallel = False torch.cuda.empty_cache() def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=GPT2DoubleHeadsModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, mc_token_ids: Optional[torch.LongTensor] = None, labels: Optional[torch.LongTensor] = None, mc_labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[Tuple, GPT2DoubleHeadsModelOutput]: r""" mc_token_ids (`torch.LongTensor` of shape `(batch_size, num_choices)`, *optional*, default to index of the last token of the input): Index of the classification token in each input sequence. Selected in the range `[0, input_ids.size(-1) - 1]`. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids`. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size - 1]` mc_labels (`torch.LongTensor` of shape `(batch_size)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]` where *num_choices* is the size of the second dimension of the input tensors. (see *input_ids* above) Return: Example: ```python >>> import torch >>> from transformers import AutoTokenizer, GPT2DoubleHeadsModel >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") >>> model = GPT2DoubleHeadsModel.from_pretrained("openai-community/gpt2") >>> # Add a [CLS] to the vocabulary (we should train it also!) >>> num_added_tokens = tokenizer.add_special_tokens({"cls_token": "[CLS]"}) >>> # Update the model embeddings with the new vocabulary size >>> embedding_layer = model.resize_token_embeddings(len(tokenizer)) >>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"] >>> encoded_choices = [tokenizer.encode(s) for s in choices] >>> cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices] >>> input_ids = torch.tensor(encoded_choices).unsqueeze(0) # Batch size: 1, number of choices: 2 >>> mc_token_ids = torch.tensor([cls_token_location]) # Batch size: 1 >>> outputs = model(input_ids, mc_token_ids=mc_token_ids) >>> lm_logits = outputs.logits >>> mc_logits = outputs.mc_logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] # Set device for model parallelism if self.model_parallel: torch.cuda.set_device(self.transformer.first_device) hidden_states = hidden_states.to(self.lm_head.weight.device) lm_logits = self.lm_head(hidden_states) mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1) mc_loss = None if mc_labels is not None: loss_fct = CrossEntropyLoss() mc_loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1)) lm_loss = None if labels is not None: labels = labels.to(lm_logits.device) shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) if not return_dict: output = (lm_logits, mc_logits) + transformer_outputs[1:] if mc_loss is not None: output = (mc_loss,) + output return ((lm_loss,) + output) if lm_loss is not None else output return GPT2DoubleHeadsModelOutput( loss=lm_loss, mc_loss=mc_loss, logits=lm_logits, mc_logits=mc_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @staticmethod def _reorder_cache( past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor ) -> Tuple[Tuple[torch.Tensor]]: """ This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct beam_idx at every generation step. """ return tuple( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past) for layer_past in past_key_values ) @add_start_docstrings( """ The GPT2 Model transformer with a sequence classification head on top (linear layer). [`GPT2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models (e.g. GPT-1) do. Since it does classification on the last token, it requires to know the position of the last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in each row of the batch). """, GPT2_START_DOCSTRING, ) class GPT2ForSequenceClassification(GPT2PreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = GPT2Model(config) self.score = nn.Linear(config.n_embd, self.num_labels, bias=False) # Model parallel self.model_parallel = False self.device_map = None # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint="microsoft/DialogRPT-updown", output_type=SequenceClassifierOutputWithPast, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, SequenceClassifierOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size, sequence_length = input_ids.shape[:2] else: batch_size, sequence_length = inputs_embeds.shape[:2] assert ( self.config.pad_token_id is not None or batch_size == 1 ), "Cannot handle batch sizes > 1 if no padding token is defined." if self.config.pad_token_id is None: sequence_lengths = -1 else: if input_ids is not None: # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 sequence_lengths = sequence_lengths % input_ids.shape[-1] sequence_lengths = sequence_lengths.to(logits.device) else: sequence_lengths = -1 logger.warning_once( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @add_start_docstrings( """ GPT2 Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, GPT2_START_DOCSTRING, ) class GPT2ForTokenClassification(GPT2PreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = GPT2Model(config) if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None: classifier_dropout = config.classifier_dropout elif hasattr(config, "hidden_dropout") and config.hidden_dropout is not None: classifier_dropout = config.hidden_dropout else: classifier_dropout = 0.1 self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Model parallel self.model_parallel = False self.device_map = None # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) # fmt: off @add_code_sample_docstrings( checkpoint="brad1141/gpt2-finetuned-comp2", output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_loss=0.25, expected_output=[ "Lead", "Lead", "Lead", "Position", "Lead", "Lead", "Lead", "Lead", "Lead", "Lead", "Lead", "Lead", ], ) # fmt: on def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] hidden_states = self.dropout(hidden_states) logits = self.classifier(hidden_states) loss = None if labels is not None: labels = labels.to(logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + transformer_outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @add_start_docstrings( """ The GPT-2 Model transformer with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`). """, GPT2_START_DOCSTRING, ) class GPT2ForQuestionAnswering(GPT2PreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = GPT2Model(config) self.qa_outputs = nn.Linear(config.hidden_size, 2) # Model parallel self.model_parallel = False self.device_map = None # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, real_checkpoint=_CHECKPOINT_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, QuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.transformer( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1).to(start_logits.device) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1).to(end_logits.device) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = [ "GPT2DoubleHeadsModel", "GPT2ForQuestionAnswering", "GPT2ForSequenceClassification", "GPT2ForTokenClassification", "GPT2LMHeadModel", "GPT2Model", "GPT2PreTrainedModel", "load_tf_weights_in_gpt2", ]
transformers/src/transformers/models/gpt2/modeling_gpt2.py/0
{ "file_path": "transformers/src/transformers/models/gpt2/modeling_gpt2.py", "repo_id": "transformers", "token_count": 33083 }
112
# coding=utf-8 # Copyright 2022 EleutherAI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for GPTNeoX.""" from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} class GPTNeoXTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" GPT-NeoX-20B tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level Byte-Pair-Encoding. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not: ```python >>> from transformers import GPTNeoXTokenizerFast >>> tokenizer = GPTNeoXTokenizerFast.from_pretrained("openai-community/gpt2") >>> tokenizer("Hello world")["input_ids"] [15496, 995] >>> tokenizer(" Hello world")["input_ids"] [18435, 995] ``` You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since the model was not pretrained this way, it might yield a decrease in performance. <Tip> When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`. </Tip> This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. errors (`str`, *optional*, defaults to `"replace"`): Paradigm to follow when decoding bytes to UTF-8. See [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. unk_token (`str`, *optional*, defaults to `<|endoftext|>`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str`, *optional*, defaults to `<|endoftext|>`): The beginning of sequence token. eos_token (`str`, *optional*, defaults to `<|endoftext|>`): The end of sequence token. pad_token (`str`, *optional*): Token for padding a sequence. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (GPTNeoX tokenizer detect beginning of words by the preceding space). add_bos_token (`bool`, *optional*, defaults to `False`): Whether or not to add a `bos_token` at the start of sequences. add_eos_token (`bool`, *optional*, defaults to `False`): Whether or not to add an `eos_token` at the end of sequences. trim_offsets (`bool`, *optional*, defaults to `True`): Whether or not the post-processing step should trim offsets to avoid including whitespaces. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file=None, merges_file=None, tokenizer_file=None, unk_token="<|endoftext|>", bos_token="<|endoftext|>", eos_token="<|endoftext|>", pad_token=None, add_bos_token=False, add_eos_token=False, add_prefix_space=False, **kwargs, ): super().__init__( vocab_file=vocab_file, merges_file=merges_file, tokenizer_file=tokenizer_file, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, add_bos_token=add_bos_token, add_eos_token=add_eos_token, add_prefix_space=add_prefix_space, **kwargs, ) self._add_bos_token = add_bos_token self._add_eos_token = add_eos_token self.update_post_processor() @property def add_eos_token(self): return self._add_eos_token @property def add_bos_token(self): return self._add_bos_token @add_eos_token.setter def add_eos_token(self, value): self._add_eos_token = value self.update_post_processor() @add_bos_token.setter def add_bos_token(self, value): self._add_bos_token = value self.update_post_processor() # Copied from transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast.update_post_processor def update_post_processor(self): """ Updates the underlying post processor with the current `bos_token` and `eos_token`. """ bos = self.bos_token bos_token_id = self.bos_token_id if bos is None and self.add_bos_token: raise ValueError("add_bos_token = True but bos_token = None") eos = self.eos_token eos_token_id = self.eos_token_id if eos is None and self.add_eos_token: raise ValueError("add_eos_token = True but eos_token = None") single = f"{(bos+':0 ') if self.add_bos_token else ''}$A:0{(' '+eos+':0') if self.add_eos_token else ''}" pair = f"{single}{(' '+bos+':1') if self.add_bos_token else ''} $B:1{(' '+eos+':1') if self.add_eos_token else ''}" special_tokens = [] if self.add_bos_token: special_tokens.append((bos, bos_token_id)) if self.add_eos_token: special_tokens.append((eos, eos_token_id)) self._tokenizer.post_processor = processors.TemplateProcessing( single=single, pair=pair, special_tokens=special_tokens ) # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.get_special_tokens_mask def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) bos_token_id = [1] if self.add_bos_token else [] eos_token_id = [1] if self.add_eos_token else [] if token_ids_1 is None: return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id return ( bos_token_id + ([0] * len(token_ids_0)) + eos_token_id + bos_token_id + ([0] * len(token_ids_1)) + eos_token_id ) # Copied from transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast.build_inputs_with_special_tokens def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): bos_token_id = [self.bos_token_id] if self.add_bos_token else [] eos_token_id = [self.eos_token_id] if self.add_eos_token else [] output = bos_token_id + token_ids_0 + eos_token_id if token_ids_1 is not None: output = output + bos_token_id + token_ids_1 + eos_token_id return output def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) __all__ = ["GPTNeoXTokenizerFast"]
transformers/src/transformers/models/gpt_neox/tokenization_gpt_neox_fast.py/0
{ "file_path": "transformers/src/transformers/models/gpt_neox/tokenization_gpt_neox_fast.py", "repo_id": "transformers", "token_count": 3720 }
113
# coding=utf-8 # Copyright 2024 IBM and the HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ...cache_utils import Cache, DynamicCache from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from ...processing_utils import Unpack from ...utils import LossKwargs, logging from ..llama.modeling_llama import LlamaAttention, LlamaDecoderLayer, LlamaForCausalLM, LlamaModel from .configuration_granite import GraniteConfig logger = logging.get_logger(__name__) class GraniteAttention(LlamaAttention): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: GraniteConfig, layer_idx: Optional[int] = None): super().__init__(config, layer_idx) self.scaling = config.attention_multiplier class GraniteDecoderLayer(LlamaDecoderLayer): def __init__(self, config: GraniteConfig, layer_idx: int): super().__init__(config, layer_idx) self.residual_multiplier = config.residual_multiplier self.self_attn = GraniteAttention(config=config, layer_idx=layer_idx) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC **kwargs, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, query_sequence_length, key_sequence_length)` if default attention is used. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*): Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`, with `head_dim` being the embedding dimension of each attention head. kwargs (`dict`, *optional*): Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code into the model """ residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = residual + hidden_states * self.residual_multiplier # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states * self.residual_multiplier # main diff with Llama outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) return outputs class GraniteModel(LlamaModel): def __init__(self, config: GraniteConfig): super().__init__(config) self.embedding_multiplier = config.embedding_multiplier self.layers = nn.ModuleList( [GraniteDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **flash_attn_kwargs: Unpack[FlashAttentionKwargs], ) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if self.gradient_checkpointing and self.training and use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." ) use_cache = False if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) inputs_embeds = inputs_embeds * self.embedding_multiplier # main diff with Llama if use_cache and past_key_values is None: past_key_values = DynamicCache() if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = self._update_causal_mask( attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions ) hidden_states = inputs_embeds # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for decoder_layer in self.layers[: self.config.num_hidden_layers]: if output_hidden_states: all_hidden_states += (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, causal_mask, position_ids, past_key_values, output_attentions, use_cache, cache_position, position_embeddings, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_value=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **flash_attn_kwargs, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) output = BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None, hidden_states=all_hidden_states, attentions=all_self_attns, ) return output if return_dict else output.to_tuple() class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ... class GraniteForCausalLM(LlamaForCausalLM): def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs: Unpack[KwargsForCausalLM], ) -> Union[Tuple, CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, **kwargs, ) hidden_states = outputs[0] # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) logits = logits / self.config.logits_scaling # main diff with Llama loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
transformers/src/transformers/models/granite/modular_granite.py/0
{ "file_path": "transformers/src/transformers/models/granite/modular_granite.py", "repo_id": "transformers", "token_count": 5382 }
114
# coding=utf-8 # Copyright 2024 The Kyutai and HuggingFace Inc. teams. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ...configuration_utils import PretrainedConfig class HeliumConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`HeliumModel`]. It is used to instantiate an Helium model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Helium 2b model. e.g. [kyutai/helium-2b](https://huggingface.co/kyutai/helium-2b) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 48000): Vocabulary size of the Helium model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`HeliumModel`] hidden_size (`int`, *optional*, defaults to 2560): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 7040): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 24): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 20): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*, defaults to 20): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `num_attention_heads`. head_dim (`int`, *optional*, defaults to 128): The attention head dimension. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The legacy activation function. It is overwritten by the `hidden_activation`. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 4096): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-08): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings rope_theta (`float`, *optional*, defaults to 100000.0): The base period of the RoPE embeddings. pad_token_id (`int`, *optional*, defaults to 3): Padding token id. eos_token_id (`int` | `list`, *optional*, defaults to 2): End of stream token id. bos_token_id (`int`, *optional*, defaults to 1): Beginning of stream token id. attention_bias (`bool`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. mlp_bias (`bool`, *optional*, defaults to `False`): Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers. ```python >>> from transformers import HeliumModel, HeliumConfig >>> # Initializing a Helium 2b style configuration >>> configuration = HeliumConfig() >>> # Initializing a model from the Helium 2b style configuration >>> model = HeliumModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "helium" keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, vocab_size=48000, hidden_size=2560, intermediate_size=7040, num_hidden_layers=24, num_attention_heads=20, num_key_value_heads=20, head_dim=128, hidden_act="silu", attention_dropout=0.0, max_position_embeddings=4096, initializer_range=0.02, rms_norm_eps=1e-8, use_cache=True, tie_word_embeddings=False, rope_theta=100000.0, pad_token_id=3, eos_token_id=2, bos_token_id=1, attention_bias=False, mlp_bias=False, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.head_dim = head_dim self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.attention_bias = attention_bias self.attention_dropout = attention_dropout self.mlp_bias = mlp_bias super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, ) __all__ = ["HeliumConfig"]
transformers/src/transformers/models/helium/configuration_helium.py/0
{ "file_path": "transformers/src/transformers/models/helium/configuration_helium.py", "repo_id": "transformers", "token_count": 2623 }
115
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Idefics2 model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING, AutoConfig logger = logging.get_logger(__name__) class Idefics2VisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Idefics2VisionModel`]. It is used to instantiate a Idefics2 vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the SigLIP checkpoint [google/siglip-base-patch16-224](https://huggingface.co/google/siglip-base-patch16-224) used in the Idefics2 model [HuggingFaceM4/idefics2-8b](https://huggingface.co/HuggingFaceM4/idefics2-8b). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. num_channels (`int`, *optional*, defaults to 3): Number of channels in the input images. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 32): The size (resolution) of each patch. hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation for initializing all weight matrices in the model. Example: ```python >>> from transformers.models.idefics2.modeling_idefics2 import Idefics2VisionTransformer >>> from transformers.models.idefics2.configuration_idefics2 import Idefics2VisionConfig >>> # Initializing a Idefics2VisionConfig with google/siglip-base-patch16-224 style configuration >>> configuration = Idefics2VisionConfig() >>> # Initializing a Idefics2VisionTransformer (with random weights) from the google/siglip-base-patch16-224 style configuration >>> model = Idefics2VisionTransformer(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "idefics2_vision" base_config_key = "vision_config" def __init__( self, hidden_size=768, intermediate_size=3072, num_hidden_layers=12, num_attention_heads=12, num_channels=3, image_size=224, patch_size=32, hidden_act="gelu_pytorch_tanh", layer_norm_eps=1e-6, attention_dropout=0.0, initializer_range=0.02, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_channels = num_channels self.patch_size = patch_size self.image_size = image_size self.attention_dropout = attention_dropout self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range class Idefics2PerceiverConfig(PretrainedConfig): r""" Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the perceiver block. hidden_size (`int`, *optional*, defaults to 4096): Dimension of the hidden representations. rms_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the rms normalization layers. resampler_n_latents (`int`, *optional*, defaults to 64): Number of latent embeddings to resample ("compress") the input sequence to (usually < 128). resampler_depth (`int`, *optional*, defaults to 3): Depth of the Perceiver Resampler (Transformer w/ cross attention). Should be shallow (<= 3). resampler_n_heads (`int`, *optional*, defaults to 16): Number of heads in each Transformer block (for multi-headed self-attention). resampler_head_dim (`int`, *optional*, defaults to 96): Dimensionality of each head projection in the Transformer block. num_key_value_heads (`int`, *optional*, defaults to 4): Number of key-value heads in the perceiver attention block. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. """ model_type = "idefics2_perceiver" def __init__( self, hidden_act="silu", hidden_size=4096, rms_norm_eps=1e-06, resampler_n_latents=64, resampler_depth=3, resampler_n_heads=16, resampler_head_dim=96, num_key_value_heads=4, attention_dropout=0.0, **kwargs, ): self.hidden_act = hidden_act self.hidden_size = hidden_size self.rms_norm_eps = rms_norm_eps self.resampler_n_latents = resampler_n_latents self.resampler_depth = resampler_depth self.resampler_n_heads = resampler_n_heads self.num_key_value_heads = num_key_value_heads self.resampler_head_dim = resampler_head_dim self.attention_dropout = attention_dropout if self.num_key_value_heads > self.resampler_n_heads: raise ValueError( f"num_key_value_heads={self.num_key_value_heads} must be less than or equal to" f" resampler_n_heads={self.resampler_n_heads}" ) super().__init__(**kwargs) class Idefics2Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Idefics2Model`]. It is used to instantiate a Idefics2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the model of the Idefics2 [HuggingFaceM4/idefics2-8b](https://huggingface.co/HuggingFaceM4/idefics2-8b) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should cache the key/value pairs of the attention mechanism. image_token_id (`int`, *optional*, defaults to 32001): The id of the "image" token. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether or not to tie the word embeddings with the token embeddings. vision_config (`IdeficsVisionConfig` or `dict`, *optional*): Custom vision config or dict perceiver_config (`IdeficsPerceiverConfig` or `dict`, *optional*): Custom perceiver config or dict text_config (`MistralConfig` or `dict`, *optional*): Custom text config or dict for the text model Example: ```python >>> from transformers import Idefics2Model, Idefics2Config >>> # Initializing configuration >>> configuration = Idefics2Config() >>> # Initializing a model from the configuration >>> model = Idefics2Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "idefics2" sub_configs = { "text_config": AutoConfig, "perceiver_config": Idefics2PerceiverConfig, "vision_config": Idefics2VisionConfig, } def __init__( self, use_cache=True, image_token_id=32_001, tie_word_embeddings=False, vision_config=None, perceiver_config=None, text_config=None, **kwargs, ): self.image_token_id = image_token_id self.use_cache = use_cache self.tie_word_embeddings = tie_word_embeddings if perceiver_config is None: self.perceiver_config = Idefics2PerceiverConfig() logger.info("perciver_config is None, using default perceiver config") elif isinstance(perceiver_config, dict): self.perceiver_config = Idefics2PerceiverConfig(**perceiver_config) elif isinstance(perceiver_config, Idefics2PerceiverConfig): self.perceiver_config = perceiver_config if vision_config is None: self.vision_config = Idefics2VisionConfig() logger.info("vision_config is None, using default vision config") elif isinstance(vision_config, dict): self.vision_config = Idefics2VisionConfig(**vision_config) elif isinstance(vision_config, Idefics2VisionConfig): self.vision_config = vision_config if isinstance(text_config, dict): text_config["model_type"] = text_config["model_type"] if "model_type" in text_config else "mistral" text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config) elif text_config is None: logger.info("text_config is None, using default text config") text_config = CONFIG_MAPPING["mistral"]( max_position_embeddings=4096 * 8, rms_norm_eps=1e-5, # None in the original configuration_mistral, we set it to the unk_token_id pad_token_id=0, tie_word_embeddings=False, ) self.text_config = text_config if self.text_config.hidden_size != self.perceiver_config.hidden_size: self.perceiver_config.hidden_size = self.text_config.hidden_size self.perceiver_config.rms_norm_eps = self.text_config.rms_norm_eps logger.warning_once( "Perceiver config has a different `hidden_size` than text config, which means default values were used. " "In your model's config on the hub, add `hidden_size` and `rms_norm_eps` keys under the `perceiver_config` dict. " ) super().__init__(**kwargs, tie_word_embeddings=tie_word_embeddings) __all__ = ["Idefics2Config"]
transformers/src/transformers/models/idefics2/configuration_idefics2.py/0
{ "file_path": "transformers/src/transformers/models/idefics2/configuration_idefics2.py", "repo_id": "transformers", "token_count": 4695 }
116
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from shutil import copyfile from typing import Optional, Tuple from tokenizers import processors from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging from ...utils.versions import require_version require_version("tokenizers>=0.13.3") if is_sentencepiece_available(): from .tokenization_llama import LlamaTokenizer else: LlamaTokenizer = None logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model", "tokenizer_file": "tokenizer.json"} B_INST, E_INST = "[INST]", "[/INST]" B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n" # fmt: off DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your \ answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure\ that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not \ correct. If you don't know the answer to a question, please don't share false information.""" # fmt: on class LlamaTokenizerFast(PreTrainedTokenizerFast): """ Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding. This uses notably ByteFallback and no normalization. ```python >>> from transformers import LlamaTokenizerFast >>> tokenizer = LlamaTokenizerFast.from_pretrained("hf-internal-testing/llama-tokenizer") >>> tokenizer.encode("Hello this is a test") [1, 15043, 445, 338, 263, 1243] ``` If you want to change the `bos_token` or the `eos_token`, make sure to specify them when initializing the model, or call `tokenizer.update_post_processor()` to make sure that the post-processing is correctly done (otherwise the values of the first token and final token of an encoded sequence will not be correct). For more details, checkout [post-processors] (https://huggingface.co/docs/tokenizers/api/post-processors) documentation. This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`, *optional*): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .model extension) that contains the vocabulary necessary to instantiate a tokenizer. tokenizer_file (`str`, *optional*): [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that contains everything needed to load the tokenizer. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra spaces. unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"</s>"`): The end of sequence token. add_bos_token (`bool`, *optional*, defaults to `True`): Whether or not to add an `bos_token` at the start of sequences. add_eos_token (`bool`, *optional*, defaults to `False`): Whether or not to add an `eos_token` at the end of sequences. use_default_system_prompt (`bool`, *optional*, defaults to `False`): Whether or not the default system prompt for Llama should be used legacy (`bool`, *optional*): Whether or not the `legacy` behavior of the tokenizer should be used. Legacy is before the merge of #24622 and #25224 which includes fixes to properly handle tokens that appear after special tokens. Make sure to also set `from_slow` to `True`. A simple example: - `legacy=True`: ```python >>> from transformers import LlamaTokenizerFast >>> tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b", legacy=True, from_slow=True) >>> tokenizer.encode("Hello <s>.") # 869 is '▁.' [1, 15043, 29871, 1, 869] ``` - `legacy=False`: ```python >>> from transformers import LlamaTokenizerFast >>> tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b", legacy=False, from_slow=True) >>> tokenizer.encode("Hello <s>.") # 29889 is '.' [1, 15043, 29871, 1, 29889] ``` Checkout the [pull request](https://github.com/huggingface/transformers/pull/24565) for more details. add_prefix_space (`bool`, *optional*): Whether or not the tokenizer should automatically add a prefix space """ vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = LlamaTokenizer padding_side = "left" model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file=None, tokenizer_file=None, clean_up_tokenization_spaces=False, unk_token="<unk>", bos_token="<s>", eos_token="</s>", add_bos_token=True, add_eos_token=False, use_default_system_prompt=False, legacy=None, add_prefix_space=None, **kwargs, ): if legacy is None: logger.warning_once( f"You are using the default legacy behaviour of the {self.__class__}. This is" " expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you." " If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it" " means, and thoroughly read the reason why this was added as explained in" " https://github.com/huggingface/transformers/pull/24565 - if you loaded a llama tokenizer from a GGUF file" " you can ignore this message." ) legacy = True self.legacy = legacy if add_prefix_space is not None: kwargs["from_slow"] = True super().__init__( vocab_file=vocab_file, tokenizer_file=tokenizer_file, clean_up_tokenization_spaces=clean_up_tokenization_spaces, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, add_bos_token=add_bos_token, add_eos_token=add_eos_token, use_default_system_prompt=use_default_system_prompt, add_prefix_space=add_prefix_space, legacy=legacy, **kwargs, ) self._add_bos_token = add_bos_token self._add_eos_token = add_eos_token self.update_post_processor() self.use_default_system_prompt = use_default_system_prompt self.vocab_file = vocab_file @property def can_save_slow_tokenizer(self) -> bool: return os.path.isfile(self.vocab_file) if self.vocab_file else False def update_post_processor(self): """ Updates the underlying post processor with the current `bos_token` and `eos_token`. """ bos = self.bos_token bos_token_id = self.bos_token_id if bos is None and self.add_bos_token: raise ValueError("add_bos_token = True but bos_token = None") eos = self.eos_token eos_token_id = self.eos_token_id if eos is None and self.add_eos_token: raise ValueError("add_eos_token = True but eos_token = None") single = f"{(bos+':0 ') if self.add_bos_token else ''}$A:0{(' '+eos+':0') if self.add_eos_token else ''}" pair = f"{single}{(' '+bos+':1') if self.add_bos_token else ''} $B:1{(' '+eos+':1') if self.add_eos_token else ''}" special_tokens = [] if self.add_bos_token: special_tokens.append((bos, bos_token_id)) if self.add_eos_token: special_tokens.append((eos, eos_token_id)) self._tokenizer.post_processor = processors.TemplateProcessing( single=single, pair=pair, special_tokens=special_tokens ) @property def add_eos_token(self): return self._add_eos_token @property def add_bos_token(self): return self._add_bos_token @add_eos_token.setter def add_eos_token(self, value): self._add_eos_token = value self.update_post_processor() @add_bos_token.setter def add_bos_token(self, value): self._add_bos_token = value self.update_post_processor() def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) # TODO ArthurZ let's rely on the template processor instead, refactor all fast tokenizers # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.build_inputs_with_special_tokens def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): bos_token_id = [self.bos_token_id] if self.add_bos_token else [] eos_token_id = [self.eos_token_id] if self.add_eos_token else [] output = bos_token_id + token_ids_0 + eos_token_id if token_ids_1 is not None: output = output + bos_token_id + token_ids_1 + eos_token_id return output __all__ = ["LlamaTokenizerFast"]
transformers/src/transformers/models/llama/tokenization_llama_fast.py/0
{ "file_path": "transformers/src/transformers/models/llama/tokenization_llama_fast.py", "repo_id": "transformers", "token_count": 4488 }
117
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors, The HuggingFace Inc. team, and the # Lxmert Authors. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF 2.0 LXMERT model.""" from __future__ import annotations import warnings from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_utils import ( TFModelInputType, TFPreTrainedModel, get_initializer, keras, keras_serializable, shape_list, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds, stable_softmax from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_lxmert import LxmertConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "unc-nlp/lxmert-base-uncased" _CONFIG_FOR_DOC = "LxmertConfig" @dataclass class TFLxmertModelOutput(ModelOutput): """ Lxmert's outputs that contain the last hidden states, pooled outputs, and attention probabilities for the language, visual, and, cross-modality encoders. (note: the visual encoder in Lxmert is referred to as the "relation-ship" encoder") Args: language_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the language encoder. vision_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the visual encoder. pooled_output (`tf.Tensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification, CLS, token) further processed by a Linear layer and a Tanh activation function. The Linear language_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. vision_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. language_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. vision_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ language_output: tf.Tensor | None = None vision_output: tf.Tensor | None = None pooled_output: tf.Tensor | None = None language_hidden_states: Tuple[tf.Tensor] | None = None vision_hidden_states: Tuple[tf.Tensor] | None = None language_attentions: Tuple[tf.Tensor] | None = None vision_attentions: Tuple[tf.Tensor] | None = None cross_encoder_attentions: Tuple[tf.Tensor] | None = None @dataclass class TFLxmertForPreTrainingOutput(ModelOutput): """ Output type of [`LxmertForPreTraining`]. Args: loss (*optional*, returned when `labels` is provided, `tf.Tensor` of shape `(1,)`): Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss. prediction_logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). cross_relationship_score (`tf.Tensor` of shape `(batch_size, 2)`): Prediction scores of the textual matching objective (classification) head (scores of True/False continuation before SoftMax). question_answering_score (`tf.Tensor` of shape `(batch_size, n_qa_answers)`): Prediction scores of question answering objective (classification). language_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. vision_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. language_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. vision_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor | None = None prediction_logits: tf.Tensor | None = None cross_relationship_score: tf.Tensor | None = None question_answering_score: tf.Tensor | None = None language_hidden_states: Tuple[tf.Tensor] | None = None vision_hidden_states: Tuple[tf.Tensor] | None = None language_attentions: Tuple[tf.Tensor] | None = None vision_attentions: Tuple[tf.Tensor] | None = None cross_encoder_attentions: Tuple[tf.Tensor] | None = None class TFLxmertVisualFeatureEncoder(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) # Object feature encoding self.visn_fc = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="visn_fc", ) self.visn_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="visn_layer_norm") # Box position encoding self.box_fc = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="box_fc", ) self.box_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="box_layer_norm") self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.feat_dim = config.visual_feat_dim self.pos_dim = config.visual_pos_dim self.config = config def call(self, visn_input, training=False): feats, boxes = visn_input x = self.visn_fc(feats) x = self.visn_layer_norm(x) y = self.box_fc(boxes) y = self.box_layer_norm(y) output = (x + y) / 2 output = self.dropout(output, training=training) return output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "visn_fc", None) is not None: with tf.name_scope(self.visn_fc.name): self.visn_fc.build([None, None, self.feat_dim]) if getattr(self, "visn_layer_norm", None) is not None: with tf.name_scope(self.visn_layer_norm.name): self.visn_layer_norm.build([None, None, self.config.hidden_size]) if getattr(self, "box_fc", None) is not None: with tf.name_scope(self.box_fc.name): self.box_fc.build([None, None, self.pos_dim]) if getattr(self, "box_layer_norm", None) is not None: with tf.name_scope(self.box_layer_norm.name): self.box_layer_norm.build([None, None, self.config.hidden_size]) class TFLxmertEmbeddings(keras.layers.Layer): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.hidden_size = config.hidden_size self.max_position_embeddings = config.max_position_embeddings self.initializer_range = config.initializer_range self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) def build(self, input_shape=None): with tf.name_scope("word_embeddings"): self.weight = self.add_weight( name="weight", shape=[self.config.vocab_size, self.hidden_size], initializer=get_initializer(initializer_range=self.initializer_range), ) with tf.name_scope("token_type_embeddings"): self.token_type_embeddings = self.add_weight( name="embeddings", shape=[self.config.type_vocab_size, self.hidden_size], initializer=get_initializer(initializer_range=self.initializer_range), ) with tf.name_scope("position_embeddings"): self.position_embeddings = self.add_weight( name="embeddings", shape=[self.max_position_embeddings, self.hidden_size], initializer=get_initializer(initializer_range=self.initializer_range), ) if self.built: return self.built = True if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) def call(self, input_ids=None, token_type_ids=None, inputs_embeds=None, training=False): """ Applies embedding based on inputs tensor. Returns: final_embeddings (`tf.Tensor`): output embedding tensor. """ assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0) position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids) final_embeddings = inputs_embeds + position_embeds + token_type_embeds final_embeddings = self.LayerNorm(inputs=final_embeddings) final_embeddings = self.dropout(inputs=final_embeddings, training=training) return final_embeddings class TFLxmertAttention(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads}" ) self.num_attention_heads = config.num_attention_heads assert config.hidden_size % config.num_attention_heads == 0 self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query", ) self.key = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key", ) self.value = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value", ) self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob) self.ctx_dim = config.hidden_size self.config = config def transpose_for_scores(self, x, batch_size): # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size] x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size)) return tf.transpose(x, perm=[0, 2, 1, 3]) def call(self, hidden_states, context, attention_mask, output_attentions, training=False): batch_size = shape_list(hidden_states)[0] mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(context) mixed_value_layer = self.value(context) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = tf.matmul( query_layer, key_layer, transpose_b=True ) # (batch size, num_heads, seq_len_q, seq_len_k) dk = tf.cast(shape_list(key_layer)[-1], dtype=attention_scores.dtype) # scale attention_scores attention_scores = attention_scores / tf.math.sqrt(dk) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in TFLxmertModel call() function) attention_mask = tf.cast(attention_mask, dtype=attention_scores.dtype) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = stable_softmax(attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs, training=training) context_layer = tf.matmul(attention_probs, value_layer) context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3]) context_layer = tf.reshape( context_layer, (batch_size, -1, self.all_head_size) ) # (batch_size, seq_len_q, all_head_size) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "query", None) is not None: with tf.name_scope(self.query.name): self.query.build([None, None, self.config.hidden_size]) if getattr(self, "key", None) is not None: with tf.name_scope(self.key.name): self.key.build([None, None, self.ctx_dim]) if getattr(self, "value", None) is not None: with tf.name_scope(self.value.name): self.value.build([None, None, self.ctx_dim]) class TFLxmertIntermediate(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) if isinstance(config.hidden_act, str): self.intermediate_act_fn = get_tf_activation(config.hidden_act) else: self.intermediate_act_fn = config.hidden_act self.config = config def call(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) class TFLxmertOutput(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.config = config def call(self, hidden_states, input_tensor, training=False): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.intermediate_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) class TFLxmertAttentionOutput(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.config = config def call(self, hidden_states, input_tensor, training=False): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) class TFLxmertSelfAttentionLayer(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.self = TFLxmertAttention(config, name="self") self.attention_output = TFLxmertAttentionOutput(config, name="output") def call(self, input_tensor, attention_mask, output_attentions, training=False): # Self attention attends to itself, thus keys and queries are the same (input_tensor). self_output = self.self(input_tensor, input_tensor, attention_mask, output_attentions) if output_attentions: attention_probs = self_output[1] attention_output = self.attention_output(self_output[0], input_tensor) return (attention_output, attention_probs) if output_attentions else (attention_output,) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self", None) is not None: with tf.name_scope(self.self.name): self.self.build(None) if getattr(self, "attention_output", None) is not None: with tf.name_scope(self.attention_output.name): self.attention_output.build(None) class TFLxmertCrossAttentionLayer(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.att = TFLxmertAttention(config, name="att") self.attention_output = TFLxmertAttentionOutput(config, name="output") def call( self, input_tensor, ctx_tensor, ctx_att_mask, output_attentions=False, training=False, ): output = self.att(input_tensor, ctx_tensor, ctx_att_mask, output_attentions, training=training) if output_attentions: attention_probs = output[1] attention_output = self.attention_output(output[0], input_tensor, training=training) outputs = (attention_output, attention_probs) if output_attentions else (attention_output,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "att", None) is not None: with tf.name_scope(self.att.name): self.att.build(None) if getattr(self, "attention_output", None) is not None: with tf.name_scope(self.attention_output.name): self.attention_output.build(None) class TFLxmertLayer(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.attention = TFLxmertSelfAttentionLayer(config, name="attention") self.intermediate = TFLxmertIntermediate(config, name="intermediate") self.transformer_output = TFLxmertOutput(config, name="output") def call(self, hidden_states, attention_mask, output_attentions, training=False): attention_outputs = self.attention(hidden_states, attention_mask, output_attentions, training=training) attention_output = attention_outputs[0] intermediate_output = self.intermediate(attention_output) layer_output = self.transformer_output(intermediate_output, attention_output, training=training) outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, "intermediate", None) is not None: with tf.name_scope(self.intermediate.name): self.intermediate.build(None) if getattr(self, "transformer_output", None) is not None: with tf.name_scope(self.transformer_output.name): self.transformer_output.build(None) class TFLxmertXLayer(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.visual_attention = TFLxmertCrossAttentionLayer(config, name="visual_attention") # Self-attention Layers self.lang_self_att = TFLxmertSelfAttentionLayer(config, name="lang_self_att") self.visn_self_att = TFLxmertSelfAttentionLayer(config, name="visn_self_att") # Intermediate and Output Layers (FFNs) self.lang_inter = TFLxmertIntermediate(config, name="lang_inter") self.lang_output = TFLxmertOutput(config, name="lang_output") self.visn_inter = TFLxmertIntermediate(config, name="visn_inter") self.visn_output = TFLxmertOutput(config, name="visn_output") def cross_att( self, lang_input, lang_attention_mask, visn_input, visn_attention_mask, output_attentions, training=False, ): # Cross Attention # Keras saving and loading model *does not work* with the same inputs for two layers. lang_attention_lang_input = tf.identity(lang_input) visn_attention_lang_input = tf.identity(lang_input) lang_attention_visn_input = tf.identity(visn_input) visn_attention_visn_input = tf.identity(visn_input) lang_att_output = self.visual_attention( lang_attention_lang_input, lang_attention_visn_input, visn_attention_mask, output_attentions=output_attentions, training=training, ) visn_att_output = self.visual_attention( visn_attention_visn_input, visn_attention_lang_input, lang_attention_mask, output_attentions=output_attentions, training=training, ) return lang_att_output, visn_att_output def self_att( self, lang_input, lang_attention_mask, visn_input, visn_attention_mask, training=False, ): # Self Attention output_attentions = False lang_att_output = self.lang_self_att(lang_input, lang_attention_mask, output_attentions, training=training) visn_att_output = self.visn_self_att(visn_input, visn_attention_mask, output_attentions, training=training) return lang_att_output[0], visn_att_output[0] def output_fc(self, lang_input, visn_input, training=False): # FC layers lang_inter_output = self.lang_inter(lang_input) visn_inter_output = self.visn_inter(visn_input) # Layer output lang_output = self.lang_output(lang_inter_output, lang_input, training) visn_output = self.visn_output(visn_inter_output, visn_input, training) return lang_output, visn_output def call( self, lang_feats, lang_attention_mask, visn_feats, visn_attention_mask, output_attentions, training=False, ): lang_att_output = lang_feats visn_att_output = visn_feats lang_att_output, visn_att_output = self.cross_att( lang_att_output, lang_attention_mask, visn_att_output, visn_attention_mask, output_attentions, training=training, ) attention_probs = lang_att_output[1:] lang_att_output, visn_att_output = self.self_att( lang_att_output[0], lang_attention_mask, visn_att_output[0], visn_attention_mask, training=training, ) lang_output, visn_output = self.output_fc(lang_att_output, visn_att_output, training=training) return (lang_output, visn_output, attention_probs[0]) if output_attentions else (lang_output, visn_output) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "visual_attention", None) is not None: with tf.name_scope(self.visual_attention.name): self.visual_attention.build(None) if getattr(self, "lang_self_att", None) is not None: with tf.name_scope(self.lang_self_att.name): self.lang_self_att.build(None) if getattr(self, "visn_self_att", None) is not None: with tf.name_scope(self.visn_self_att.name): self.visn_self_att.build(None) if getattr(self, "lang_inter", None) is not None: with tf.name_scope(self.lang_inter.name): self.lang_inter.build(None) if getattr(self, "lang_output", None) is not None: with tf.name_scope(self.lang_output.name): self.lang_output.build(None) if getattr(self, "visn_inter", None) is not None: with tf.name_scope(self.visn_inter.name): self.visn_inter.build(None) if getattr(self, "visn_output", None) is not None: with tf.name_scope(self.visn_output.name): self.visn_output.build(None) class TFLxmertEncoder(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.visn_fc = TFLxmertVisualFeatureEncoder(config, name="visn_fc") # Number of layers self.num_l_layers = config.l_layers self.num_x_layers = config.x_layers self.num_r_layers = config.r_layers # Layers # Using self.layer instead of self.l_layer to support loading BERT weights. self.layer = [TFLxmertLayer(config, name=f"layer_._{i}") for i in range(self.num_l_layers)] self.x_layers = [TFLxmertXLayer(config, name=f"x_layers_._{i}") for i in range(self.num_x_layers)] self.r_layers = [TFLxmertLayer(config, name=f"r_layers_._{i}") for i in range(self.num_r_layers)] self.config = config def call( self, lang_feats=None, lang_attention_mask=None, visual_feats=None, visual_pos=None, visual_attention_mask=None, output_attentions=None, training=False, ): vision_hidden_states = () language_hidden_states = () vision_attentions = () if output_attentions or self.config.output_attentions else None language_attentions = () if output_attentions or self.config.output_attentions else None cross_encoder_attentions = () if output_attentions or self.config.output_attentions else None visual_feats = self.visn_fc([visual_feats, visual_pos], training=training) # Run language layers for layer_module in self.layer: l_outputs = layer_module(lang_feats, lang_attention_mask, output_attentions, training=training) lang_feats = l_outputs[0] language_hidden_states = language_hidden_states + (lang_feats,) if language_attentions is not None: language_attentions = language_attentions + (l_outputs[1],) # Run relational layers for layer_module in self.r_layers: v_outputs = layer_module( visual_feats, visual_attention_mask, output_attentions, training=training, ) visual_feats = v_outputs[0] vision_hidden_states = vision_hidden_states + (visual_feats,) if vision_attentions is not None: vision_attentions = vision_attentions + (v_outputs[1],) # Run cross-modality layers for layer_module in self.x_layers: x_outputs = layer_module( lang_feats, lang_attention_mask, visual_feats, visual_attention_mask, output_attentions, training=training, ) lang_feats, visual_feats = x_outputs[:2] vision_hidden_states = vision_hidden_states + (visual_feats,) language_hidden_states = language_hidden_states + (lang_feats,) if cross_encoder_attentions is not None: cross_encoder_attentions = cross_encoder_attentions + (x_outputs[2],) visual_encoder_outputs = ( vision_hidden_states, vision_attentions if output_attentions else None, ) lang_encoder_outputs = ( language_hidden_states, language_attentions if output_attentions else None, ) return ( visual_encoder_outputs, lang_encoder_outputs, cross_encoder_attentions if output_attentions else None, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "visn_fc", None) is not None: with tf.name_scope(self.visn_fc.name): self.visn_fc.build(None) if getattr(self, "layer", None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None) if getattr(self, "x_layers", None) is not None: for layer in self.x_layers: with tf.name_scope(layer.name): layer.build(None) if getattr(self, "r_layers", None) is not None: for layer in self.r_layers: with tf.name_scope(layer.name): layer.build(None) @keras_serializable class TFLxmertMainLayer(keras.layers.Layer): config_class = LxmertConfig def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.num_l_layers = config.l_layers self.num_x_layers = config.x_layers self.num_r_layers = config.r_layers self.initializer_range = config.initializer_range self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.return_dict = config.use_return_dict self.embeddings = TFLxmertEmbeddings(config, name="embeddings") self.encoder = TFLxmertEncoder(config, name="encoder") self.pooler = TFLxmertPooler(config, name="pooler") self.config = config def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, value): self.embeddings.weight = value self.embeddings.vocab_size = shape_list(value)[0] def _prune_heads(self, heads_to_prune): raise NotImplementedError @unpack_inputs def call( self, input_ids=None, visual_feats=None, visual_pos=None, attention_mask=None, visual_attention_mask=None, token_type_ids=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if visual_pos is None or visual_feats is None: raise ValueError("visual_feats and visual_pos cannot be `None` in LXMERT's `call` method.") if attention_mask is None: attention_mask = tf.fill(input_shape, 1) if token_type_ids is None: token_type_ids = tf.fill(input_shape, 0) # Positional Word Embeddings embedding_output = self.embeddings(input_ids, token_type_ids, inputs_embeds, training) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1])) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype) one_cst = tf.constant(1.0, dtype=embedding_output.dtype) ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype) extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst) if visual_attention_mask is not None: extended_visual_attention_mask = tf.reshape(visual_attention_mask, (input_shape[0], 1, 1, input_shape[1])) extended_visual_attention_mask = tf.expand_dims(tf.expand_dims(visual_attention_mask, axis=1), axis=1) extended_visual_attention_mask = tf.cast(extended_visual_attention_mask, dtype=embedding_output.dtype) extended_visual_attention_mask = tf.multiply( tf.subtract(one_cst, extended_visual_attention_mask), ten_thousand_cst ) else: extended_visual_attention_mask = None # Run Lxmert encoder encoder_outputs = self.encoder( embedding_output, extended_attention_mask, visual_feats, visual_pos, extended_visual_attention_mask, output_attentions, training, ) visual_encoder_outputs, lang_encoder_outputs = encoder_outputs[:2] vision_hidden_states = visual_encoder_outputs[0] language_hidden_states = lang_encoder_outputs[0] all_attentions = () if output_attentions: language_attentions = lang_encoder_outputs[1] vision_attentions = visual_encoder_outputs[1] cross_encoder_attentions = encoder_outputs[2] all_attentions = ( language_attentions, vision_attentions, cross_encoder_attentions, ) hidden_states = (language_hidden_states, vision_hidden_states) if output_hidden_states else () visual_output = vision_hidden_states[-1] lang_output = language_hidden_states[-1] pooled_output = self.pooler(lang_output) if not return_dict: return (lang_output, visual_output, pooled_output) + hidden_states + all_attentions return TFLxmertModelOutput( pooled_output=pooled_output, language_output=lang_output, vision_output=visual_output, language_hidden_states=language_hidden_states if output_hidden_states else None, vision_hidden_states=vision_hidden_states if output_hidden_states else None, language_attentions=language_attentions if output_attentions else None, vision_attentions=vision_attentions if output_attentions else None, cross_encoder_attentions=cross_encoder_attentions if output_attentions else None, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "pooler", None) is not None: with tf.name_scope(self.pooler.name): self.pooler.build(None) class TFLxmertPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = LxmertConfig base_model_prefix = "lxmert" @property def dummy_inputs(self): """ Dummy inputs to build the network. Returns: tf.Tensor with dummy inputs """ batch_size = 2 num_visual_features = 10 input_ids = tf.constant([[3, 5, 6], [2, 3, 4]], dtype=tf.int32) visual_feats = tf.random.uniform((batch_size, num_visual_features, self.config.visual_feat_dim)) visual_pos = tf.random.uniform((batch_size, num_visual_features, 4)) return { "input_ids": input_ids, "visual_feats": visual_feats, "visual_pos": visual_pos, } @property def input_signature(self): return { "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), "visual_feats": tf.TensorSpec((None, None, self.config.visual_feat_dim), tf.float32, name="visual_feats"), "visual_pos": tf.TensorSpec((None, None, 4), tf.float32, name="visual_pos"), "visual_attention_mask": tf.TensorSpec((None, None), tf.int32, name="visual_attention_mask"), "token_type_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"), } LXMERT_START_DOCSTRING = r""" The LXMERT model was proposed in [LXMERT: Learning Cross-Modality Encoder Representations from Transformers](https://arxiv.org/abs/1908.07490) by Hao Tan and Mohit Bansal. It's a vision and language transformer model, pre-trained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MCSCOCO captions, and Visual genome, using a combination of masked language modeling, region of interest feature regression, cross entropy loss for question answering attribute prediction, and object tag prediction. This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Parameters: config ([`LxmertConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ LXMERT_INPUTS_DOCSTRING = r""" Args: input_ids (`np.ndarray` or `tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) visual_feats (`tf.Tensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`): This input represents visual features. They ROI pooled object features from bounding boxes using a faster-RCNN model) These are currently not provided by the transformers library. visual_pos (`tf.Tensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`): This input represents spacial features corresponding to their relative (via index) visual features. The pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to 1. These are currently not provided by the transformers library. attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) visual_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): MMask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare Lxmert Model transformer outputting raw hidden-states without any specific head on top.", LXMERT_START_DOCSTRING, ) class TFLxmertModel(TFLxmertPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.lxmert = TFLxmertMainLayer(config, name="lxmert") @unpack_inputs @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFLxmertModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, visual_feats: tf.Tensor | None = None, visual_pos: tf.Tensor | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, visual_attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[Tuple, TFLxmertModelOutput]: outputs = self.lxmert( input_ids, visual_feats, visual_pos, attention_mask, visual_attention_mask, token_type_ids, inputs_embeds, output_attentions, output_hidden_states, return_dict, training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "lxmert", None) is not None: with tf.name_scope(self.lxmert.name): self.lxmert.build(None) class TFLxmertPooler(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation="tanh", name="dense", ) self.config = config def call(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) return pooled_output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) # Copied from transformers.models.bert.modeling_tf_bert.TFBertPredictionHeadTransform with Bert->Lxmert class TFLxmertPredictionHeadTransform(keras.layers.Layer): def __init__(self, config: LxmertConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) if isinstance(config.hidden_act, str): self.transform_act_fn = get_tf_activation(config.hidden_act) else: self.transform_act_fn = config.hidden_act self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(inputs=hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) # Copied from transformers.models.bert.modeling_tf_bert.TFBertLMPredictionHead with Bert->Lxmert class TFLxmertLMPredictionHead(keras.layers.Layer): def __init__(self, config: LxmertConfig, input_embeddings: keras.layers.Layer, **kwargs): super().__init__(**kwargs) self.config = config self.hidden_size = config.hidden_size self.transform = TFLxmertPredictionHeadTransform(config, name="transform") # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.input_embeddings = input_embeddings def build(self, input_shape=None): self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias") if self.built: return self.built = True if getattr(self, "transform", None) is not None: with tf.name_scope(self.transform.name): self.transform.build(None) def get_output_embeddings(self) -> keras.layers.Layer: return self.input_embeddings def set_output_embeddings(self, value: tf.Variable): self.input_embeddings.weight = value self.input_embeddings.vocab_size = shape_list(value)[0] def get_bias(self) -> Dict[str, tf.Variable]: return {"bias": self.bias} def set_bias(self, value: tf.Variable): self.bias = value["bias"] self.config.vocab_size = shape_list(value["bias"])[0] def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.transform(hidden_states=hidden_states) seq_length = shape_list(hidden_states)[1] hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size]) hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True) hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size]) hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias) return hidden_states # Copied from transformers.models.bert.modeling_tf_bert.TFBertMLMHead with Bert->Lxmert class TFLxmertMLMHead(keras.layers.Layer): def __init__(self, config: LxmertConfig, input_embeddings: keras.layers.Layer, **kwargs): super().__init__(**kwargs) self.predictions = TFLxmertLMPredictionHead(config, input_embeddings, name="predictions") def call(self, sequence_output: tf.Tensor) -> tf.Tensor: prediction_scores = self.predictions(hidden_states=sequence_output) return prediction_scores def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "predictions", None) is not None: with tf.name_scope(self.predictions.name): self.predictions.build(None) class TFLxmertPreTrainingHeads(keras.layers.Layer): def __init__(self, config, input_embeddings, **kwargs): super().__init__(**kwargs) self.predictions = TFLxmertLMPredictionHead(config, input_embeddings, name="predictions") self.seq_relationship = keras.layers.Dense( 2, kernel_initializer=get_initializer(config.initializer_range), name="seq_relationship", ) self.config = config def call(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "predictions", None) is not None: with tf.name_scope(self.predictions.name): self.predictions.build(None) if getattr(self, "seq_relationship", None) is not None: with tf.name_scope(self.seq_relationship.name): self.seq_relationship.build([None, None, self.config.hidden_size]) class TFLxmertVisualAnswerHead(keras.layers.Layer): def __init__(self, config, num_labels, **kwargs): super().__init__(**kwargs) hid_dim = config.hidden_size self.dense = keras.layers.Dense( hid_dim * 2, kernel_initializer=get_initializer(config.initializer_range), name="logit_fc_._0", ) self.activation = get_tf_activation("gelu") self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="logit_fc_._2") self.dense_1 = keras.layers.Dense( num_labels, kernel_initializer=get_initializer(config.initializer_range), name="logit_fc_._3", ) self.hid_dim = hid_dim def call(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.layer_norm(hidden_states) hidden_states = self.dense_1(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.hid_dim]) if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, self.hid_dim * 2]) if getattr(self, "dense_1", None) is not None: with tf.name_scope(self.dense_1.name): self.dense_1.build([None, None, self.hid_dim * 2]) class TFLxmertVisualObjHead(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.transform = TFLxmertPredictionHeadTransform(config, name="transform") # Decide the use of visual losses visual_losses = {} if config.visual_obj_loss: visual_losses["obj"] = {"shape": (-1,), "num": config.num_object_labels} if config.visual_attr_loss: visual_losses["attr"] = {"shape": (-1,), "num": config.num_attr_labels} if config.visual_feat_loss: visual_losses["feat"] = {"shape": (-1, 2048), "num": config.visual_feat_dim} self.visual_losses = visual_losses # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder_dict = { key: keras.layers.Dense( self.visual_losses[key]["num"], kernel_initializer=get_initializer(config.initializer_range), name=f"decoder_dict.{key}", ) for key in self.visual_losses } self.config = config def call(self, hidden_states): hidden_states = self.transform(hidden_states) output = {} for key in self.visual_losses: output[key] = self.decoder_dict[key](hidden_states) return output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "transform", None) is not None: with tf.name_scope(self.transform.name): self.transform.build(None) if getattr(self, "decoder_dict", None) is not None: for layer in self.decoder_dict.values(): with tf.name_scope(layer.name): layer.build([None, None, self.config.hidden_size]) @add_start_docstrings("""Lxmert Model with a `language modeling` head on top.""", LXMERT_START_DOCSTRING) class TFLxmertForPreTraining(TFLxmertPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.config = config self.num_qa_labels = config.num_qa_labels self.visual_loss_normalizer = config.visual_loss_normalizer # Use of pretraining tasks self.task_mask_lm = config.task_mask_lm self.task_obj_predict = config.task_obj_predict self.task_matched = config.task_matched self.task_qa = config.task_qa # Lxmert backbone self.lxmert = TFLxmertMainLayer(config, name="lxmert") # Pre-training heads self.cls = TFLxmertPreTrainingHeads(config, self.lxmert.embeddings, name="cls") if self.task_obj_predict: self.obj_predict_head = TFLxmertVisualObjHead(config, name="obj_predict_head") if self.task_qa: self.answer_head = TFLxmertVisualAnswerHead(config, self.num_qa_labels, name="answer_head") # Loss functions self.loss_fcts = { "l2": keras.losses.Huber(delta=1.0, name="huber_loss"), "visn_ce": keras.losses.SparseCategoricalCrossentropy(from_logits=True), "ce": keras.losses.SparseCategoricalCrossentropy(from_logits=True), } visual_losses = {} if config.visual_obj_loss: visual_losses["obj"] = { "shape": (-1,), "num": config.num_object_labels, "loss": "visn_ce", } if config.visual_attr_loss: visual_losses["attr"] = { "shape": (-1,), "num": config.num_attr_labels, "loss": "visn_ce", } if config.visual_feat_loss: visual_losses["feat"] = { "shape": (-1, config.visual_feat_dim), "num": config.visual_feat_dim, "loss": "l2", } self.visual_losses = visual_losses @property def dummy_inputs(self): """ Dummy inputs to build the network. Returns: tf.Tensor with dummy inputs """ batch_size = 2 num_visual_features = 10 input_ids = tf.constant([[3, 5, 6], [2, 3, 4]], dtype=tf.int32) visual_feats = tf.random.uniform((batch_size, num_visual_features, self.config.visual_feat_dim)) visual_pos = tf.random.uniform((batch_size, num_visual_features, 4)) if self.config.task_obj_predict: obj_labels = {} if self.config.visual_attr_loss and self.config.task_obj_predict: obj_labels["attr"] = ( tf.ones([batch_size, num_visual_features]), tf.ones([batch_size, num_visual_features]), ) if self.config.visual_feat_loss and self.config.task_obj_predict: obj_labels["feat"] = ( tf.ones([batch_size, num_visual_features, self.config.visual_feat_dim]), tf.ones([batch_size, num_visual_features]), ) if self.config.visual_obj_loss and self.config.task_obj_predict: obj_labels["obj"] = ( tf.ones([batch_size, num_visual_features]), tf.ones([batch_size, num_visual_features]), ) return { **{ "input_ids": input_ids, "visual_feats": visual_feats, "visual_pos": visual_pos, }, **({"obj_labels": obj_labels} if self.config.task_obj_predict else {}), } def get_lm_head(self): return self.cls.predictions def get_prefix_bias_name(self): warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) return self.name + "/" + self.cls.name + "/" + self.cls.predictions.name @unpack_inputs @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFLxmertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, visual_feats: tf.Tensor | None = None, visual_pos: tf.Tensor | None = None, attention_mask: tf.Tensor | None = None, visual_attention_mask: tf.Tensor | None = None, token_type_ids: tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, masked_lm_labels: tf.Tensor | None = None, obj_labels: Dict[str, Tuple[tf.Tensor, tf.Tensor]] | None = None, matched_label: tf.Tensor | None = None, ans: tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> Tuple[tf.Tensor] | TFLxmertForPreTrainingOutput: r""" masked_lm_labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` obj_labels (`Dict[Str: Tuple[tf.Tensor, tf.Tensor]]`, *optional*, defaults to `None`): each key is named after each one of the visual losses and each element of the tuple is of the shape `(batch_size, num_features)` and `(batch_size, num_features, visual_feature_dim)` for each the label id and the label score respectively matched_label (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the whether or not the text input matches the image (classification) loss. Input should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`: - 0 indicates that the sentence does not match the image, - 1 indicates that the sentence does match the image. ans (`tf.Tensor` of shape `(batch_size)`, *optional*, defaults to `None`): a one hot representation hof the correct answer *optional* Returns: """ lxmert_output = self.lxmert( input_ids, visual_feats, visual_pos, attention_mask, visual_attention_mask, token_type_ids, inputs_embeds, output_attentions, output_hidden_states, return_dict, training, ) lang_output, visual_output, pooled_output = ( lxmert_output[0], lxmert_output[1], lxmert_output[2], ) lang_prediction_scores, cross_relationship_score = self.cls(lang_output, pooled_output) if self.task_qa: answer_score = self.answer_head(pooled_output) else: answer_score = pooled_output[0][0] total_loss = ( None if (masked_lm_labels is None and matched_label is None and obj_labels is None and ans is None) else tf.constant(0.0) ) losses = () if masked_lm_labels is not None and self.task_mask_lm: masked_lm_loss = self.loss_fcts["ce"]( tf.reshape(masked_lm_labels, [-1]), tf.reshape(lang_prediction_scores, [-1, self.config.vocab_size]), ) total_loss += masked_lm_loss losses += (masked_lm_loss,) if matched_label is not None and self.task_matched: matched_loss = self.loss_fcts["ce"]( tf.reshape(matched_label, [-1]), tf.reshape(cross_relationship_score, [-1, 2]), ) total_loss += matched_loss losses += (matched_loss,) if obj_labels is not None and self.task_obj_predict: total_visn_loss = 0.0 visn_prediction_scores_dict = self.obj_predict_head(visual_output) for key, key_info in self.visual_losses.items(): label, mask_conf = obj_labels[key] output_dim = key_info["num"] loss_fct_name = key_info["loss"] label_shape = key_info["shape"] weight = self.visual_loss_normalizer visn_loss_fct = self.loss_fcts[loss_fct_name] visn_prediction_scores = visn_prediction_scores_dict[key] visn_loss = visn_loss_fct( tf.reshape(label, label_shape), tf.reshape(visn_prediction_scores, [-1, output_dim]), ) if visn_loss.ndim > 1: # Regression Losses visn_loss = tf.reduce_mean(visn_loss) visn_loss = tf.reduce_mean(visn_loss * tf.cast(tf.reshape(mask_conf, [-1]), visn_loss.dtype)) * weight total_visn_loss += visn_loss losses += (visn_loss,) total_loss += total_visn_loss if ans is not None and self.task_qa: answer_loss = self.loss_fcts["ce"]( tf.reshape(ans, [-1]), tf.reshape(answer_score, [-1, self.num_qa_labels]) ) # exclude "*2" here to match the effect of QA losses. # Previous: (loss *0) for 6 epochs, (loss *2) for 6 epochs. (Used 10 instead of 6 in EMNLP paper) # Now : (loss *1) for 12 epochs # # * 2 # Multiply by 2 because > half of the data will not have label total_loss += answer_loss losses += (answer_loss,) # return total_loss, tf.stack(losses)[tf.new_axis, ...], answer_score.detach() if not return_dict: output = ( lang_prediction_scores, cross_relationship_score, answer_score, ) + lxmert_output[3:] return ((total_loss,) + output) if total_loss is not None else output return TFLxmertForPreTrainingOutput( loss=total_loss, prediction_logits=lang_prediction_scores, cross_relationship_score=cross_relationship_score, question_answering_score=answer_score, language_hidden_states=lxmert_output.language_hidden_states, vision_hidden_states=lxmert_output.vision_hidden_states, language_attentions=lxmert_output.language_attentions, vision_attentions=lxmert_output.vision_attentions, cross_encoder_attentions=lxmert_output.cross_encoder_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "lxmert", None) is not None: with tf.name_scope(self.lxmert.name): self.lxmert.build(None) if getattr(self, "cls", None) is not None: with tf.name_scope(self.cls.name): self.cls.build(None) if getattr(self, "obj_predict_head", None) is not None: with tf.name_scope(self.obj_predict_head.name): self.obj_predict_head.build(None) if getattr(self, "answer_head", None) is not None: with tf.name_scope(self.answer_head.name): self.answer_head.build(None) __all__ = [ "TFLxmertForPreTraining", "TFLxmertMainLayer", "TFLxmertModel", "TFLxmertPreTrainedModel", "TFLxmertVisualFeatureEncoder", ]
transformers/src/transformers/models/lxmert/modeling_tf_lxmert.py/0
{ "file_path": "transformers/src/transformers/models/lxmert/modeling_tf_lxmert.py", "repo_id": "transformers", "token_count": 31887 }
118
# coding=utf-8 # Copyright 2022 Meta Platforms, Inc.and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mask2Former model configuration""" from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import verify_backbone_config_arguments from ..auto import CONFIG_MAPPING logger = logging.get_logger(__name__) class Mask2FormerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Mask2FormerModel`]. It is used to instantiate a Mask2Former model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Mask2Former [facebook/mask2former-swin-small-coco-instance](https://huggingface.co/facebook/mask2former-swin-small-coco-instance) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Currently, Mask2Former only supports the [Swin Transformer](swin) as backbone. Args: backbone_config (`PretrainedConfig` or `dict`, *optional*, defaults to `SwinConfig()`): The configuration of the backbone model. If unset, the configuration corresponding to `swin-base-patch4-window12-384` will be used. backbone (`str`, *optional*): Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. use_pretrained_backbone (`bool`, *optional*, `False`): Whether to use pretrained weights for the backbone. use_timm_backbone (`bool`, *optional*, `False`): Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers library. backbone_kwargs (`dict`, *optional*): Keyword arguments to be passed to AutoBackbone when loading from a checkpoint e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. feature_size (`int`, *optional*, defaults to 256): The features (channels) of the resulting feature maps. mask_feature_size (`int`, *optional*, defaults to 256): The masks' features size, this value will also be used to specify the Feature Pyramid Network features' size. hidden_dim (`int`, *optional*, defaults to 256): Dimensionality of the encoder layers. encoder_feedforward_dim (`int`, *optional*, defaults to 1024): Dimension of feedforward network for deformable detr encoder used as part of pixel decoder. encoder_layers (`int`, *optional*, defaults to 6): Number of layers in the deformable detr encoder used as part of pixel decoder. decoder_layers (`int`, *optional*, defaults to 10): Number of layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder. dim_feedforward (`int`, *optional*, defaults to 2048): Feature dimension in feedforward network for transformer decoder. pre_norm (`bool`, *optional*, defaults to `False`): Whether to use pre-LayerNorm or not for transformer decoder. enforce_input_projection (`bool`, *optional*, defaults to `False`): Whether to add an input projection 1x1 convolution even if the input channels and hidden dim are identical in the Transformer decoder. common_stride (`int`, *optional*, defaults to 4): Parameter used for determining number of FPN levels used as part of pixel decoder. ignore_value (`int`, *optional*, defaults to 255): Category id to be ignored during training. num_queries (`int`, *optional*, defaults to 100): Number of queries for the decoder. no_object_weight (`int`, *optional*, defaults to 0.1): The weight to apply to the null (no object) class. class_weight (`int`, *optional*, defaults to 2.0): The weight for the cross entropy loss. mask_weight (`int`, *optional*, defaults to 5.0): The weight for the mask loss. dice_weight (`int`, *optional*, defaults to 5.0): The weight for the dice loss. train_num_points (`str` or `function`, *optional*, defaults to 12544): Number of points used for sampling during loss calculation. oversample_ratio (`float`, *optional*, defaults to 3.0): Oversampling parameter used for calculating no. of sampled points importance_sample_ratio (`float`, *optional*, defaults to 0.75): Ratio of points that are sampled via importance sampling. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. init_xavier_std (`float`, *optional*, defaults to 1.0): The scaling factor used for the Xavier initialization gain in the HM Attention map module. use_auxiliary_loss (`boolean``, *optional*, defaults to `True`): If `True` [`Mask2FormerForUniversalSegmentationOutput`] will contain the auxiliary losses computed using the logits from each decoder's stage. feature_strides (`List[int]`, *optional*, defaults to `[4, 8, 16, 32]`): Feature strides corresponding to features generated from backbone network. output_auxiliary_logits (`bool`, *optional*): Should the model output its `auxiliary_logits` or not. Examples: ```python >>> from transformers import Mask2FormerConfig, Mask2FormerModel >>> # Initializing a Mask2Former facebook/mask2former-swin-small-coco-instance configuration >>> configuration = Mask2FormerConfig() >>> # Initializing a model (with random weights) from the facebook/mask2former-swin-small-coco-instance style configuration >>> model = Mask2FormerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "mask2former" backbones_supported = ["swin"] attribute_map = {"hidden_size": "hidden_dim"} def __init__( self, backbone_config: Optional[Dict] = None, feature_size: int = 256, mask_feature_size: int = 256, hidden_dim: int = 256, encoder_feedforward_dim: int = 1024, activation_function: str = "relu", encoder_layers: int = 6, decoder_layers: int = 10, num_attention_heads: int = 8, dropout: float = 0.0, dim_feedforward: int = 2048, pre_norm: bool = False, enforce_input_projection: bool = False, common_stride: int = 4, ignore_value: int = 255, num_queries: int = 100, no_object_weight: float = 0.1, class_weight: float = 2.0, mask_weight: float = 5.0, dice_weight: float = 5.0, train_num_points: int = 12544, oversample_ratio: float = 3.0, importance_sample_ratio: float = 0.75, init_std: float = 0.02, init_xavier_std: float = 1.0, use_auxiliary_loss: bool = True, feature_strides: List[int] = [4, 8, 16, 32], output_auxiliary_logits: bool = None, backbone: Optional[str] = None, use_pretrained_backbone: bool = False, use_timm_backbone: bool = False, backbone_kwargs: Optional[Dict] = None, **kwargs, ): if backbone_config is None and backbone is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.") backbone_config = CONFIG_MAPPING["swin"]( image_size=224, num_channels=3, patch_size=4, embed_dim=96, depths=[2, 2, 18, 2], num_heads=[3, 6, 12, 24], window_size=7, drop_path_rate=0.3, use_absolute_embeddings=False, out_features=["stage1", "stage2", "stage3", "stage4"], ) elif isinstance(backbone_config, dict): backbone_model_type = backbone_config.pop("model_type") config_class = CONFIG_MAPPING[backbone_model_type] backbone_config = config_class.from_dict(backbone_config) verify_backbone_config_arguments( use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, backbone=backbone, backbone_config=backbone_config, backbone_kwargs=backbone_kwargs, ) # verify that the backbone is supported if backbone_config is not None and backbone_config.model_type not in self.backbones_supported: logger.warning_once( f"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. " f"Supported model types: {','.join(self.backbones_supported)}" ) self.backbone_config = backbone_config self.feature_size = feature_size self.mask_feature_size = mask_feature_size self.hidden_dim = hidden_dim self.encoder_feedforward_dim = encoder_feedforward_dim self.activation_function = activation_function self.encoder_layers = encoder_layers self.decoder_layers = decoder_layers self.num_attention_heads = num_attention_heads self.dropout = dropout self.dim_feedforward = dim_feedforward self.pre_norm = pre_norm self.enforce_input_projection = enforce_input_projection self.common_stride = common_stride self.ignore_value = ignore_value self.num_queries = num_queries self.no_object_weight = no_object_weight self.class_weight = class_weight self.mask_weight = mask_weight self.dice_weight = dice_weight self.train_num_points = train_num_points self.oversample_ratio = oversample_ratio self.importance_sample_ratio = importance_sample_ratio self.init_std = init_std self.init_xavier_std = init_xavier_std self.use_auxiliary_loss = use_auxiliary_loss self.feature_strides = feature_strides self.output_auxiliary_logits = output_auxiliary_logits self.num_hidden_layers = decoder_layers self.backbone = backbone self.use_pretrained_backbone = use_pretrained_backbone self.use_timm_backbone = use_timm_backbone self.backbone_kwargs = backbone_kwargs super().__init__(**kwargs) @classmethod def from_backbone_config(cls, backbone_config: PretrainedConfig, **kwargs): """Instantiate a [`Mask2FormerConfig`] (or a derived class) from a pre-trained backbone model configuration. Args: backbone_config ([`PretrainedConfig`]): The backbone configuration. Returns: [`Mask2FormerConfig`]: An instance of a configuration object """ return cls( backbone_config=backbone_config, **kwargs, ) __all__ = ["Mask2FormerConfig"]
transformers/src/transformers/models/mask2former/configuration_mask2former.py/0
{ "file_path": "transformers/src/transformers/models/mask2former/configuration_mask2former.py", "repo_id": "transformers", "token_count": 4790 }
119
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def remove_ignore_keys_(state_dict): ignore_keys = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "_float_tensor", "decoder.output_projection.weight", ] for k in ignore_keys: state_dict.pop(k, None) def make_linear_from_emb(emb): vocab_size, emb_size = emb.weight.shape lin_layer = nn.Linear(vocab_size, emb_size, bias=False) lin_layer.weight.data = emb.weight.data return lin_layer def convert_fairseq_mbart_checkpoint_from_disk( checkpoint_path, hf_config_path="facebook/mbart-large-en-ro", finetuned=False, mbart_50=False ): state_dict = torch.load(checkpoint_path, map_location="cpu")["model"] remove_ignore_keys_(state_dict) vocab_size = state_dict["encoder.embed_tokens.weight"].shape[0] mbart_config = MBartConfig.from_pretrained(hf_config_path, vocab_size=vocab_size) if mbart_50 and finetuned: mbart_config.activation_function = "relu" state_dict["shared.weight"] = state_dict["decoder.embed_tokens.weight"] model = MBartForConditionalGeneration(mbart_config) model.model.load_state_dict(state_dict) if finetuned: model.lm_head = make_linear_from_emb(model.model.shared) return model if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem." ) parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--hf_config", default="facebook/mbart-large-cc25", type=str, help="Which huggingface architecture to use: mbart-large", ) parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint") parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint") args = parser.parse_args() model = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_50=args.mbart_50 ) model.save_pretrained(args.pytorch_dump_folder_path)
transformers/src/transformers/models/mbart/convert_mbart_original_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/mbart/convert_mbart_original_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 1117 }
120
# Copyright 2023 Mixtral AI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _import_structure = { "configuration_mixtral": ["MixtralConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_mixtral"] = [ "MixtralForCausalLM", "MixtralForQuestionAnswering", "MixtralModel", "MixtralPreTrainedModel", "MixtralForSequenceClassification", "MixtralForTokenClassification", ] if TYPE_CHECKING: from .configuration_mixtral import MixtralConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mixtral import ( MixtralForCausalLM, MixtralForQuestionAnswering, MixtralForSequenceClassification, MixtralForTokenClassification, MixtralModel, MixtralPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
transformers/src/transformers/models/mixtral/__init__.py/0
{ "file_path": "transformers/src/transformers/models/mixtral/__init__.py", "repo_id": "transformers", "token_count": 711 }
121
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, mobilebert_config_file, pytorch_dump_path): # Initialise PyTorch model config = MobileBertConfig.from_json_file(mobilebert_config_file) print(f"Building PyTorch model from configuration: {config}") model = MobileBertForPreTraining(config) # Load weights from tf checkpoint model = load_tf_weights_in_mobilebert(model, config, tf_checkpoint_path) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}") torch.save(model.state_dict(), pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--mobilebert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained MobileBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
transformers/src/transformers/models/mobilebert/convert_mobilebert_original_tf_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/mobilebert/convert_mobilebert_original_tf_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 753 }
122
# coding=utf-8 # Copyright 2022 Apple Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch MobileNetV2 model.""" from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, SemanticSegmenterOutput, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_mobilenet_v2 import MobileNetV2Config logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "MobileNetV2Config" # Base docstring _CHECKPOINT_FOR_DOC = "google/mobilenet_v2_1.0_224" _EXPECTED_OUTPUT_SHAPE = [1, 1280, 7, 7] # Image classification docstring _IMAGE_CLASS_CHECKPOINT = "google/mobilenet_v2_1.0_224" _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" def _build_tf_to_pytorch_map(model, config, tf_weights=None): """ A map of modules from TF to PyTorch. """ tf_to_pt_map = {} if isinstance(model, (MobileNetV2ForImageClassification, MobileNetV2ForSemanticSegmentation)): backbone = model.mobilenet_v2 else: backbone = model # Use the EMA weights if available def ema(x): return x + "/ExponentialMovingAverage" if x + "/ExponentialMovingAverage" in tf_weights else x prefix = "MobilenetV2/Conv/" tf_to_pt_map[ema(prefix + "weights")] = backbone.conv_stem.first_conv.convolution.weight tf_to_pt_map[ema(prefix + "BatchNorm/beta")] = backbone.conv_stem.first_conv.normalization.bias tf_to_pt_map[ema(prefix + "BatchNorm/gamma")] = backbone.conv_stem.first_conv.normalization.weight tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = backbone.conv_stem.first_conv.normalization.running_mean tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = backbone.conv_stem.first_conv.normalization.running_var prefix = "MobilenetV2/expanded_conv/depthwise/" tf_to_pt_map[ema(prefix + "depthwise_weights")] = backbone.conv_stem.conv_3x3.convolution.weight tf_to_pt_map[ema(prefix + "BatchNorm/beta")] = backbone.conv_stem.conv_3x3.normalization.bias tf_to_pt_map[ema(prefix + "BatchNorm/gamma")] = backbone.conv_stem.conv_3x3.normalization.weight tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = backbone.conv_stem.conv_3x3.normalization.running_mean tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = backbone.conv_stem.conv_3x3.normalization.running_var prefix = "MobilenetV2/expanded_conv/project/" tf_to_pt_map[ema(prefix + "weights")] = backbone.conv_stem.reduce_1x1.convolution.weight tf_to_pt_map[ema(prefix + "BatchNorm/beta")] = backbone.conv_stem.reduce_1x1.normalization.bias tf_to_pt_map[ema(prefix + "BatchNorm/gamma")] = backbone.conv_stem.reduce_1x1.normalization.weight tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = backbone.conv_stem.reduce_1x1.normalization.running_mean tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = backbone.conv_stem.reduce_1x1.normalization.running_var for i in range(16): tf_index = i + 1 pt_index = i pointer = backbone.layer[pt_index] prefix = f"MobilenetV2/expanded_conv_{tf_index}/expand/" tf_to_pt_map[ema(prefix + "weights")] = pointer.expand_1x1.convolution.weight tf_to_pt_map[ema(prefix + "BatchNorm/beta")] = pointer.expand_1x1.normalization.bias tf_to_pt_map[ema(prefix + "BatchNorm/gamma")] = pointer.expand_1x1.normalization.weight tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = pointer.expand_1x1.normalization.running_mean tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = pointer.expand_1x1.normalization.running_var prefix = f"MobilenetV2/expanded_conv_{tf_index}/depthwise/" tf_to_pt_map[ema(prefix + "depthwise_weights")] = pointer.conv_3x3.convolution.weight tf_to_pt_map[ema(prefix + "BatchNorm/beta")] = pointer.conv_3x3.normalization.bias tf_to_pt_map[ema(prefix + "BatchNorm/gamma")] = pointer.conv_3x3.normalization.weight tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = pointer.conv_3x3.normalization.running_mean tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = pointer.conv_3x3.normalization.running_var prefix = f"MobilenetV2/expanded_conv_{tf_index}/project/" tf_to_pt_map[ema(prefix + "weights")] = pointer.reduce_1x1.convolution.weight tf_to_pt_map[ema(prefix + "BatchNorm/beta")] = pointer.reduce_1x1.normalization.bias tf_to_pt_map[ema(prefix + "BatchNorm/gamma")] = pointer.reduce_1x1.normalization.weight tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = pointer.reduce_1x1.normalization.running_mean tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = pointer.reduce_1x1.normalization.running_var prefix = "MobilenetV2/Conv_1/" tf_to_pt_map[ema(prefix + "weights")] = backbone.conv_1x1.convolution.weight tf_to_pt_map[ema(prefix + "BatchNorm/beta")] = backbone.conv_1x1.normalization.bias tf_to_pt_map[ema(prefix + "BatchNorm/gamma")] = backbone.conv_1x1.normalization.weight tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = backbone.conv_1x1.normalization.running_mean tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = backbone.conv_1x1.normalization.running_var if isinstance(model, MobileNetV2ForImageClassification): prefix = "MobilenetV2/Logits/Conv2d_1c_1x1/" tf_to_pt_map[ema(prefix + "weights")] = model.classifier.weight tf_to_pt_map[ema(prefix + "biases")] = model.classifier.bias if isinstance(model, MobileNetV2ForSemanticSegmentation): prefix = "image_pooling/" tf_to_pt_map[prefix + "weights"] = model.segmentation_head.conv_pool.convolution.weight tf_to_pt_map[prefix + "BatchNorm/beta"] = model.segmentation_head.conv_pool.normalization.bias tf_to_pt_map[prefix + "BatchNorm/gamma"] = model.segmentation_head.conv_pool.normalization.weight tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = model.segmentation_head.conv_pool.normalization.running_mean tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = ( model.segmentation_head.conv_pool.normalization.running_var ) prefix = "aspp0/" tf_to_pt_map[prefix + "weights"] = model.segmentation_head.conv_aspp.convolution.weight tf_to_pt_map[prefix + "BatchNorm/beta"] = model.segmentation_head.conv_aspp.normalization.bias tf_to_pt_map[prefix + "BatchNorm/gamma"] = model.segmentation_head.conv_aspp.normalization.weight tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = model.segmentation_head.conv_aspp.normalization.running_mean tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = ( model.segmentation_head.conv_aspp.normalization.running_var ) prefix = "concat_projection/" tf_to_pt_map[prefix + "weights"] = model.segmentation_head.conv_projection.convolution.weight tf_to_pt_map[prefix + "BatchNorm/beta"] = model.segmentation_head.conv_projection.normalization.bias tf_to_pt_map[prefix + "BatchNorm/gamma"] = model.segmentation_head.conv_projection.normalization.weight tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = ( model.segmentation_head.conv_projection.normalization.running_mean ) tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = ( model.segmentation_head.conv_projection.normalization.running_var ) prefix = "logits/semantic/" tf_to_pt_map[ema(prefix + "weights")] = model.segmentation_head.classifier.convolution.weight tf_to_pt_map[ema(prefix + "biases")] = model.segmentation_head.classifier.convolution.bias return tf_to_pt_map def load_tf_weights_in_mobilenet_v2(model, config, tf_checkpoint_path): """Load TensorFlow checkpoints in a PyTorch model.""" try: import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise # Load weights from TF model init_vars = tf.train.list_variables(tf_checkpoint_path) tf_weights = {} for name, shape in init_vars: logger.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(tf_checkpoint_path, name) tf_weights[name] = array # Build TF to PyTorch weights loading map tf_to_pt_map = _build_tf_to_pytorch_map(model, config, tf_weights) for name, pointer in tf_to_pt_map.items(): logger.info(f"Importing {name}") if name not in tf_weights: logger.info(f"{name} not in tf pre-trained weights, skipping") continue array = tf_weights[name] if "depthwise_weights" in name: logger.info("Transposing depthwise") array = np.transpose(array, (2, 3, 0, 1)) elif "weights" in name: logger.info("Transposing") if len(pointer.shape) == 2: # copying into linear layer array = array.squeeze().transpose() else: array = np.transpose(array, (3, 2, 0, 1)) if pointer.shape != array.shape: raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched") logger.info(f"Initialize PyTorch weight {name} {array.shape}") pointer.data = torch.from_numpy(array) tf_weights.pop(name, None) tf_weights.pop(name + "/RMSProp", None) tf_weights.pop(name + "/RMSProp_1", None) tf_weights.pop(name + "/ExponentialMovingAverage", None) tf_weights.pop(name + "/Momentum", None) logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}") return model def make_divisible(value: int, divisor: int = 8, min_value: Optional[int] = None) -> int: """ Ensure that all layers have a channel count that is divisible by `divisor`. This function is taken from the original TensorFlow repo. It can be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py """ if min_value is None: min_value = divisor new_value = max(min_value, int(value + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than 10%. if new_value < 0.9 * value: new_value += divisor return int(new_value) def apply_depth_multiplier(config: MobileNetV2Config, channels: int) -> int: return make_divisible(int(round(channels * config.depth_multiplier)), config.depth_divisible_by, config.min_depth) def apply_tf_padding(features: torch.Tensor, conv_layer: nn.Conv2d) -> torch.Tensor: """ Apply TensorFlow-style "SAME" padding to a convolution layer. See the notes at: https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2 """ in_height = int(features.shape[-2]) in_width = int(features.shape[-1]) stride_height, stride_width = conv_layer.stride kernel_height, kernel_width = conv_layer.kernel_size dilation_height, dilation_width = conv_layer.dilation if in_height % stride_height == 0: pad_along_height = max(kernel_height - stride_height, 0) else: pad_along_height = max(kernel_height - (in_height % stride_height), 0) if in_width % stride_width == 0: pad_along_width = max(kernel_width - stride_width, 0) else: pad_along_width = max(kernel_width - (in_width % stride_width), 0) pad_left = pad_along_width // 2 pad_right = pad_along_width - pad_left pad_top = pad_along_height // 2 pad_bottom = pad_along_height - pad_top padding = ( pad_left * dilation_width, pad_right * dilation_width, pad_top * dilation_height, pad_bottom * dilation_height, ) return nn.functional.pad(features, padding, "constant", 0.0) class MobileNetV2ConvLayer(nn.Module): def __init__( self, config: MobileNetV2Config, in_channels: int, out_channels: int, kernel_size: int, stride: int = 1, groups: int = 1, bias: bool = False, dilation: int = 1, use_normalization: bool = True, use_activation: Union[bool, str] = True, layer_norm_eps: Optional[float] = None, ) -> None: super().__init__() self.config = config if in_channels % groups != 0: raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups.") if out_channels % groups != 0: raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups.") padding = 0 if config.tf_padding else int((kernel_size - 1) / 2) * dilation self.convolution = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, padding_mode="zeros", ) if use_normalization: self.normalization = nn.BatchNorm2d( num_features=out_channels, eps=config.layer_norm_eps if layer_norm_eps is None else layer_norm_eps, momentum=0.997, affine=True, track_running_stats=True, ) else: self.normalization = None if use_activation: if isinstance(use_activation, str): self.activation = ACT2FN[use_activation] elif isinstance(config.hidden_act, str): self.activation = ACT2FN[config.hidden_act] else: self.activation = config.hidden_act else: self.activation = None def forward(self, features: torch.Tensor) -> torch.Tensor: if self.config.tf_padding: features = apply_tf_padding(features, self.convolution) features = self.convolution(features) if self.normalization is not None: features = self.normalization(features) if self.activation is not None: features = self.activation(features) return features class MobileNetV2InvertedResidual(nn.Module): def __init__( self, config: MobileNetV2Config, in_channels: int, out_channels: int, stride: int, dilation: int = 1 ) -> None: super().__init__() expanded_channels = make_divisible( int(round(in_channels * config.expand_ratio)), config.depth_divisible_by, config.min_depth ) if stride not in [1, 2]: raise ValueError(f"Invalid stride {stride}.") self.use_residual = (stride == 1) and (in_channels == out_channels) self.expand_1x1 = MobileNetV2ConvLayer( config, in_channels=in_channels, out_channels=expanded_channels, kernel_size=1 ) self.conv_3x3 = MobileNetV2ConvLayer( config, in_channels=expanded_channels, out_channels=expanded_channels, kernel_size=3, stride=stride, groups=expanded_channels, dilation=dilation, ) self.reduce_1x1 = MobileNetV2ConvLayer( config, in_channels=expanded_channels, out_channels=out_channels, kernel_size=1, use_activation=False, ) def forward(self, features: torch.Tensor) -> torch.Tensor: residual = features features = self.expand_1x1(features) features = self.conv_3x3(features) features = self.reduce_1x1(features) return residual + features if self.use_residual else features class MobileNetV2Stem(nn.Module): def __init__(self, config: MobileNetV2Config, in_channels: int, expanded_channels: int, out_channels: int) -> None: super().__init__() # The very first layer is a regular 3x3 convolution with stride 2 that expands to 32 channels. # All other expansion layers use the expansion factor to compute the number of output channels. self.first_conv = MobileNetV2ConvLayer( config, in_channels=in_channels, out_channels=expanded_channels, kernel_size=3, stride=2, ) if config.first_layer_is_expansion: self.expand_1x1 = None else: self.expand_1x1 = MobileNetV2ConvLayer( config, in_channels=expanded_channels, out_channels=expanded_channels, kernel_size=1 ) self.conv_3x3 = MobileNetV2ConvLayer( config, in_channels=expanded_channels, out_channels=expanded_channels, kernel_size=3, stride=1, groups=expanded_channels, ) self.reduce_1x1 = MobileNetV2ConvLayer( config, in_channels=expanded_channels, out_channels=out_channels, kernel_size=1, use_activation=False, ) def forward(self, features: torch.Tensor) -> torch.Tensor: features = self.first_conv(features) if self.expand_1x1 is not None: features = self.expand_1x1(features) features = self.conv_3x3(features) features = self.reduce_1x1(features) return features class MobileNetV2PreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = MobileNetV2Config load_tf_weights = load_tf_weights_in_mobilenet_v2 base_model_prefix = "mobilenet_v2" main_input_name = "pixel_values" supports_gradient_checkpointing = False _no_split_modules = [] def _init_weights(self, module: Union[nn.Linear, nn.Conv2d]) -> None: """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.BatchNorm2d): module.bias.data.zero_() module.weight.data.fill_(1.0) MOBILENET_V2_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MobileNetV2Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ MOBILENET_V2_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`MobileNetV2ImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare MobileNetV2 model outputting raw hidden-states without any specific head on top.", MOBILENET_V2_START_DOCSTRING, ) class MobileNetV2Model(MobileNetV2PreTrainedModel): def __init__(self, config: MobileNetV2Config, add_pooling_layer: bool = True): super().__init__(config) self.config = config # Output channels for the projection layers channels = [16, 24, 24, 32, 32, 32, 64, 64, 64, 64, 96, 96, 96, 160, 160, 160, 320] channels = [apply_depth_multiplier(config, x) for x in channels] # Strides for the depthwise layers strides = [2, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1] self.conv_stem = MobileNetV2Stem( config, in_channels=config.num_channels, expanded_channels=apply_depth_multiplier(config, 32), out_channels=channels[0], ) current_stride = 2 # first conv layer has stride 2 dilation = 1 self.layer = nn.ModuleList() for i in range(16): # Keep making the feature maps smaller or use dilated convolution? if current_stride == config.output_stride: layer_stride = 1 layer_dilation = dilation dilation *= strides[i] # larger dilation starts in next block else: layer_stride = strides[i] layer_dilation = 1 current_stride *= layer_stride self.layer.append( MobileNetV2InvertedResidual( config, in_channels=channels[i], out_channels=channels[i + 1], stride=layer_stride, dilation=layer_dilation, ) ) if config.finegrained_output and config.depth_multiplier < 1.0: output_channels = 1280 else: output_channels = apply_depth_multiplier(config, 1280) self.conv_1x1 = MobileNetV2ConvLayer( config, in_channels=channels[-1], out_channels=output_channels, kernel_size=1, ) self.pooler = nn.AdaptiveAvgPool2d((1, 1)) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def _prune_heads(self, heads_to_prune): raise NotImplementedError @add_start_docstrings_to_model_forward(MOBILENET_V2_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndNoAttention, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, pixel_values: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.conv_stem(pixel_values) all_hidden_states = () if output_hidden_states else None for i, layer_module in enumerate(self.layer): hidden_states = layer_module(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) last_hidden_state = self.conv_1x1(hidden_states) if self.pooler is not None: pooled_output = torch.flatten(self.pooler(last_hidden_state), start_dim=1) else: pooled_output = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=all_hidden_states, ) @add_start_docstrings( """ MobileNetV2 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """, MOBILENET_V2_START_DOCSTRING, ) class MobileNetV2ForImageClassification(MobileNetV2PreTrainedModel): def __init__(self, config: MobileNetV2Config) -> None: super().__init__(config) self.num_labels = config.num_labels self.mobilenet_v2 = MobileNetV2Model(config) last_hidden_size = self.mobilenet_v2.conv_1x1.convolution.out_channels # Classifier head self.dropout = nn.Dropout(config.classifier_dropout_prob, inplace=True) self.classifier = nn.Linear(last_hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MOBILENET_V2_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=ImageClassifierOutputWithNoAttention, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def forward( self, pixel_values: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, ImageClassifierOutputWithNoAttention]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss). If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilenet_v2(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs.pooler_output if return_dict else outputs[1] logits = self.classifier(self.dropout(pooled_output)) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=loss, logits=logits, hidden_states=outputs.hidden_states, ) class MobileNetV2DeepLabV3Plus(nn.Module): """ The neural network from the paper "Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation" https://arxiv.org/abs/1802.02611 """ def __init__(self, config: MobileNetV2Config) -> None: super().__init__() self.avg_pool = nn.AdaptiveAvgPool2d(output_size=1) self.conv_pool = MobileNetV2ConvLayer( config, in_channels=apply_depth_multiplier(config, 320), out_channels=256, kernel_size=1, stride=1, use_normalization=True, use_activation="relu", layer_norm_eps=1e-5, ) self.conv_aspp = MobileNetV2ConvLayer( config, in_channels=apply_depth_multiplier(config, 320), out_channels=256, kernel_size=1, stride=1, use_normalization=True, use_activation="relu", layer_norm_eps=1e-5, ) self.conv_projection = MobileNetV2ConvLayer( config, in_channels=512, out_channels=256, kernel_size=1, stride=1, use_normalization=True, use_activation="relu", layer_norm_eps=1e-5, ) self.dropout = nn.Dropout2d(config.classifier_dropout_prob) self.classifier = MobileNetV2ConvLayer( config, in_channels=256, out_channels=config.num_labels, kernel_size=1, use_normalization=False, use_activation=False, bias=True, ) def forward(self, features: torch.Tensor) -> torch.Tensor: spatial_size = features.shape[-2:] features_pool = self.avg_pool(features) features_pool = self.conv_pool(features_pool) features_pool = nn.functional.interpolate( features_pool, size=spatial_size, mode="bilinear", align_corners=True ) features_aspp = self.conv_aspp(features) features = torch.cat([features_pool, features_aspp], dim=1) features = self.conv_projection(features) features = self.dropout(features) features = self.classifier(features) return features @add_start_docstrings( """ MobileNetV2 model with a semantic segmentation head on top, e.g. for Pascal VOC. """, MOBILENET_V2_START_DOCSTRING, ) class MobileNetV2ForSemanticSegmentation(MobileNetV2PreTrainedModel): def __init__(self, config: MobileNetV2Config) -> None: super().__init__(config) self.num_labels = config.num_labels self.mobilenet_v2 = MobileNetV2Model(config, add_pooling_layer=False) self.segmentation_head = MobileNetV2DeepLabV3Plus(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MOBILENET_V2_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=SemanticSegmenterOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, SemanticSegmenterOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy). Returns: Examples: ```python >>> from transformers import AutoImageProcessor, MobileNetV2ForSemanticSegmentation >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513") >>> model = MobileNetV2ForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513") >>> inputs = image_processor(images=image, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> # logits are of shape (batch_size, num_labels, height, width) >>> logits = outputs.logits ```""" output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None and self.config.num_labels == 1: raise ValueError("The number of labels should be greater than one") outputs = self.mobilenet_v2( pixel_values, output_hidden_states=True, # we need the intermediate hidden states return_dict=return_dict, ) encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1] logits = self.segmentation_head(encoder_hidden_states[-1]) loss = None if labels is not None: # upsample logits to the images' original size upsampled_logits = nn.functional.interpolate( logits, size=labels.shape[-2:], mode="bilinear", align_corners=False ) loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index) loss = loss_fct(upsampled_logits, labels) if not return_dict: if output_hidden_states: output = (logits,) + outputs[1:] else: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SemanticSegmenterOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None, ) __all__ = [ "MobileNetV2ForImageClassification", "MobileNetV2ForSemanticSegmentation", "MobileNetV2Model", "MobileNetV2PreTrainedModel", "load_tf_weights_in_mobilenet_v2", ]
transformers/src/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py/0
{ "file_path": "transformers/src/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py", "repo_id": "transformers", "token_count": 15170 }
123
# coding=utf-8 # Copyright 2023 HuggingFace Inc. team and MosaicML NLP team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mpt configuration""" from typing import TYPE_CHECKING, Optional, Union if TYPE_CHECKING: pass from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class MptAttentionConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`MptAttention`] class. It is used to instantiate attention layers according to the specified arguments, defining the layers architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MPT [mosaicml/mpt-7b](https://huggingface.co/mosaicml/mpt-7b) architecture. Most of the arguments are kept for backward compatibility with previous MPT models that are hosted on the Hub (previously with `trust_remote_code=True`). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: attn_type (`str`, *optional*, defaults to `"multihead_attention"`): type of attention to use. Options: `"multihead_attention"`, `"multiquery_attention"`. attn_pdrop (`float`, *optional*, defaults to `0.0`): The dropout probability for the attention layers. attn_impl (`str`, *optional*, defaults to `"torch"`): The attention implementation to use. One of `"torch"`, `"flash"`, or `"triton"`. clip_qkv (`float`, *optional*): If not `None`, clip the queries, keys, and values in the attention layer to this value. softmax_scale (`float`, *optional*): If not `None`, scale the softmax in the attention layer by this value. If `None`, will default to `1/sqrt(hidden_size)`. prefix_lm (`bool`, *optional*, defaults to `False`): Whether the model should operate as a Prefix LM. This requires passing an extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix can attend to one another bi-directionally. Tokens outside the prefix use causal attention. qk_ln (`bool`, *optional*, defaults to `False`): Whether to apply layer normalization to the queries and keys in the attention layer. attn_uses_sequence_id (`bool`, *optional*, defaults to `False`): Whether to restrict attention to tokens that have the same token_type_ids. When the model is in `train` mode, this requires passing an extra *token_type_ids* argument which indicates which sub-sequence each token belongs to. Defaults to `False` meaning any provided *token_type_ids* will be ignored. alibi (`bool`, *optional*, defaults to `True`): Whether or not to use the alibi bias instead of positional embedding. alibi_bias_max (`int`, *optional*, defaults to 8): The maximum value of the alibi bias. """ base_config_key = "attn_config" def __init__( self, attn_type="multihead_attention", attn_pdrop=0, attn_impl="torch", clip_qkv=None, softmax_scale=None, prefix_lm=False, qk_ln=False, attn_uses_sequence_id=False, alibi=True, alibi_bias_max=8, **kwargs, ): super().__init__() self.attn_type = attn_type self.attn_pdrop = attn_pdrop self.attn_impl = attn_impl self.clip_qkv = clip_qkv self.softmax_scale = softmax_scale self.prefix_lm = prefix_lm self.attn_uses_sequence_id = attn_uses_sequence_id self.alibi = alibi self.qk_ln = qk_ln self.alibi_bias_max = alibi_bias_max if attn_type not in ["multihead_attention", "multiquery_attention"]: raise ValueError( f"`attn_type` has to be either `multihead_attention` or `multiquery_attention`. Received: {attn_type}" ) class MptConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`MptModel`]. It is used to instantiate a Mpt model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to the Mpt-7b architecture [mosaicml/mpt-7b](https://huggingface.co/mosaicml/mpt-7b). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: d_model (`int`, *optional*, defaults to 2048): Dimensionality of the embeddings and hidden states. n_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. n_layers (`int`, *optional*, defaults to 24): Number of hidden layers in the Transformer encoder. expansion_ratio (`int`, *optional*, defaults to 4): The ratio of the up/down scale in the MLP. max_seq_len (`int`, *optional*, defaults to 2048): The maximum sequence length of the model. vocab_size (`int`, *optional*, defaults to 50368): Vocabulary size of the Mpt model. Defines the maximum number of different tokens that can be represented by the `inputs_ids` passed when calling [`MptModel`]. Check [this discussion](https://huggingface.co/bigscience/mpt/discussions/120#633d28389addb8530b406c2a) on how the `vocab_size` has been defined. resid_pdrop (`float`, *optional*, defaults to 0.0): The dropout probability applied to the attention output before combining with residual. layer_norm_epsilon (`float`, *optional*, defaults to 1e-05): The epsilon to use in the layer normalization layers. emb_pdrop (`float`, *optional*, defaults to 0.0): The dropout probability for the embedding layer. learned_pos_emb (`bool`, *optional*, defaults to `True`): Whether to use learned positional embeddings. attn_config (`dict`, *optional*): A dictionary used to configure the model's attention module. init_device (`str`, *optional*, defaults to `"cpu"`): The device to use for parameter initialization. Defined for backward compatibility logit_scale (`float`, *optional*): If not None, scale the logits by this value. no_bias (`bool`, *optional*, defaults to `True`): Whether to use bias in all linear layers. verbose (`int`, *optional*, defaults to 0): The verbosity level to use for logging. Used in the previous versions of MPT models for logging. This argument is deprecated. embedding_fraction (`float`, *optional*, defaults to 1.0): The fraction to scale the gradients of the embedding layer by. norm_type (`str`, *optional*, defaults to `"low_precision_layernorm"`): Type of layer norm to use. All MPT models uses the same layer norm implementation. Defined for backward compatibility. use_cache (`bool`, *optional*, defaults to `False`): Whether or not the model should return the last key/values attentions (not used by all models). initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. Example: ```python >>> from transformers import MptConfig, MptModel >>> # Initializing a Mpt configuration >>> configuration = MptConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = MptModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "mpt" sub_configs = {"attn_config": MptAttentionConfig} attribute_map = { "num_attention_heads": "n_heads", "hidden_size": "d_model", "num_hidden_layers": "n_layers", } def __init__( self, d_model: int = 2048, n_heads: int = 16, n_layers: int = 24, expansion_ratio: int = 4, max_seq_len: int = 2048, vocab_size: int = 50368, resid_pdrop: float = 0.0, layer_norm_epsilon: float = 1e-5, emb_pdrop: float = 0.0, learned_pos_emb: bool = True, attn_config: MptAttentionConfig = None, init_device: str = "cpu", logit_scale: Optional[Union[float, str]] = None, no_bias: bool = True, verbose: int = 0, embedding_fraction: float = 1.0, norm_type: str = "low_precision_layernorm", use_cache: bool = False, initializer_range=0.02, **kwargs, ): if attn_config is None: self.attn_config = MptAttentionConfig() elif isinstance(attn_config, dict): self.attn_config = MptAttentionConfig(**attn_config) else: self.attn_config = attn_config self.d_model = d_model self.n_heads = n_heads self.n_layers = n_layers self.expansion_ratio = expansion_ratio self.max_seq_len = max_seq_len self.vocab_size = vocab_size self.resid_pdrop = resid_pdrop self.emb_pdrop = emb_pdrop self.learned_pos_emb = learned_pos_emb self.init_device = init_device self.logit_scale = logit_scale self.no_bias = no_bias self.verbose = verbose self.embedding_fraction = embedding_fraction self.norm_type = norm_type self.layer_norm_epsilon = layer_norm_epsilon self.use_cache = use_cache self.initializer_range = initializer_range super().__init__(**kwargs) __all__ = ["MptConfig"]
transformers/src/transformers/models/mpt/configuration_mpt.py/0
{ "file_path": "transformers/src/transformers/models/mpt/configuration_mpt.py", "repo_id": "transformers", "token_count": 4106 }
124
# coding=utf-8 # Copyright 2023 Meta AI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Musicgen model.""" import copy import inspect import math import random from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import torch import torch.nn as nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...generation import ( ClassifierFreeGuidanceLogitsProcessor, GenerationConfig, GenerationMixin, GenerationMode, LogitsProcessorList, StoppingCriteriaList, ) from ...modeling_attn_mask_utils import ( _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa, _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa, ) from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, ModelOutput, Seq2SeqLMOutput, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, logging, replace_return_docstrings, ) from ..auto.configuration_auto import AutoConfig from ..auto.modeling_auto import AutoModel from .configuration_musicgen import MusicgenConfig, MusicgenDecoderConfig if is_flash_attn_2_available(): from ...modeling_flash_attention_utils import _flash_attention_forward if TYPE_CHECKING: from ...generation.streamers import BaseStreamer logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "MusicgenConfig" _CHECKPOINT_FOR_DOC = "facebook/musicgen-small" @dataclass class MusicgenUnconditionalInput(ModelOutput): """ Args: encoder_outputs (`Tuple[torch.FloatTensor]` of length 1, with tensor shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the text encoder model. attention_mask (`torch.LongTensor`) of shape `(batch_size, sequence_length)`, *optional*): Encoder attention mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: 1 for tokens that are **not masked**, 0 for tokens that are **masked**. guidance_scale (`float`, *optional*): Guidance scale for classifier free guidance, setting the balance between the conditional logits (predicted from the prompts) and the unconditional logits (predicted without prompts). """ encoder_outputs: Tuple[torch.FloatTensor] = None attention_mask: torch.LongTensor = None guidance_scale: float = None def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ Shift input ids one token to the right. """ # transpose to get (bsz, num_codebooks, seq_len) input_ids = input_ids.transpose(1, 2) shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() if decoder_start_token_id is None: raise ValueError("Make sure to set the decoder_start_token_id attribute of the model's configuration.") shifted_input_ids[..., 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("Make sure to set the pad_token_id attribute of the model's configuration.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids class MusicgenSinusoidalPositionalEmbedding(nn.Module): """This module produces sinusoidal positional embeddings of any length.""" def __init__(self, num_positions: int, embedding_dim: int): super().__init__() self.embedding_dim = embedding_dim self.make_weights(num_positions, embedding_dim) def make_weights(self, num_embeddings: int, embedding_dim: int): emb_weights = self.get_embedding(num_embeddings, embedding_dim) if hasattr(self, "weights"): # in forward put the weights on the correct dtype and device of the param emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device) self.weights = nn.Parameter(emb_weights) self.weights.requires_grad = False self.weights.detach_() @staticmethod def get_embedding(num_embeddings: int, embedding_dim: int): """ Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb) emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0) emb = torch.cat([torch.cos(emb), torch.sin(emb)], dim=1).view(num_embeddings, -1) if embedding_dim % 2 == 1: # zero pad emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) return emb.to(torch.get_default_dtype()) @torch.no_grad() def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0): bsz, codebooks, seq_len = input_ids.size() # Create the position ids from the input token ids. position_ids = (torch.arange(seq_len) + past_key_values_length).to(input_ids.device) # expand embeddings if needed if seq_len > self.weights.size(0): self.make_weights(seq_len + self.offset, self.embedding_dim) return self.weights.index_select(0, position_ids.view(-1)).detach() # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Musicgen class MusicgenAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, is_causal: bool = False, config: Optional[MusicgenConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned across GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value # Copied from transformers.models.bart.modeling_bart.BartFlashAttention2 with Bart->Musicgen class MusicgenFlashAttention2(MusicgenAttention): """ Musicgen flash attention module. This module inherits from `MusicgenAttention` as the weights of the module stays untouched. The only required change would be on the forward pass where it needs to correctly call the public API of flash attention and deal with padding tokens in case the input contains any of them. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def _reshape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim) def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: # MusicgenFlashAttention2 attention does not support output_attentions if output_attentions: raise ValueError("MusicgenFlashAttention2 attention does not support output_attentions") # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, q_len, _ = hidden_states.size() # get query proj query_states = self._reshape(self.q_proj(hidden_states), -1, bsz) # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0].transpose(1, 2) value_states = past_key_value[1].transpose(1, 2) elif is_cross_attention: # cross_attentions key_states = self._reshape(self.k_proj(key_value_states), -1, bsz) value_states = self._reshape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._reshape(self.k_proj(hidden_states), -1, bsz) value_states = self._reshape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0].transpose(1, 2), key_states], dim=1) value_states = torch.cat([past_key_value[1].transpose(1, 2), value_states], dim=1) else: # self_attention key_states = self._reshape(self.k_proj(hidden_states), -1, bsz) value_states = self._reshape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states.transpose(1, 2), value_states.transpose(1, 2)) kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need # cast them back in the correct dtype just to be sure everything works as expected. # This might slowdown training & inference so it is recommended to not cast the LayerNorms # in fp32. (LlamaRMSNorm handles it correctly) input_dtype = query_states.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() # Handle the case where the model is quantized elif hasattr(self.config, "_pre_quantization_dtype"): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once( f"The input hidden states seems to be silently casted in float32, this might be related to" f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" f" {target_dtype}." ) query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) attn_output = _flash_attention_forward( query_states, key_states, value_states, attention_mask, q_len, dropout=self.dropout if self.training else 0.0, is_causal=self.is_causal, use_top_left_mask=self._flash_attn_uses_top_left_mask, ) attn_output = attn_output.reshape(bsz, q_len, -1) attn_output = self.out_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value class MusicgenSdpaAttention(MusicgenAttention): def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" if output_attentions or layer_head_mask is not None: # TODO: Improve this warning with e.g. `model.config._attn_implementation = "manual"` once this is implemented. logger.warning_once( "MusicgenModel is using MusicgenSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True` or `layer_head_mask` not None. Falling back to the manual attention" ' implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) return super().forward( hidden_states, key_value_states=key_value_states, past_key_value=past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) if ( attention_mask is not None and (attention_mask.mean(dim=[1, 2, 3]) <= torch.finfo(attention_mask.dtype).min).any() ): logger.warning_once( '`torch.nn.functional.scaled_dot_product_attention` does not support having an empty attention mask. Falling back to the manual attention implementation. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' "Note that this probably happens because `guidance_scale>1` or because you used `get_unconditional_inputs`. See https://github.com/huggingface/transformers/issues/31189 for more information." ) return super().forward( hidden_states, key_value_states=key_value_states, past_key_value=past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) query_states = self._shape(query_states, tgt_len, bsz) # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling. # The tgt_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case tgt_len == 1. is_causal = True if self.is_causal and attention_mask is None and tgt_len > 1 else False # NOTE: SDPA with memory-efficient backend is currently (torch==2.1.2) bugged when using non-contiguous inputs and a custom attn_mask, # but we are fine here as `_shape` do call `.contiguous()`. Reference: https://github.com/pytorch/pytorch/issues/112577 attn_output = torch.nn.functional.scaled_dot_product_attention( query_states, key_states, value_states, attn_mask=attention_mask, dropout_p=self.dropout if self.training else 0.0, is_causal=is_causal, ) if attn_output.size() != (bsz, self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned across GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, None, past_key_value MUSICGEN_ATTENTION_CLASSES = { "eager": MusicgenAttention, "sdpa": MusicgenSdpaAttention, "flash_attention_2": MusicgenFlashAttention2, } class MusicgenDecoderLayer(nn.Module): def __init__(self, config: MusicgenDecoderConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = MUSICGEN_ATTENTION_CLASSES[config._attn_implementation]( embed_dim=self.embed_dim, num_heads=config.num_attention_heads, dropout=config.attention_dropout, is_decoder=True, bias=False, is_causal=True, config=config, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn = MUSICGEN_ATTENTION_CLASSES[config._attn_implementation]( self.embed_dim, config.num_attention_heads, dropout=config.attention_dropout, is_decoder=True, bias=False, config=config, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.ffn_dim, bias=False) self.fc2 = nn.Linear(config.ffn_dim, self.embed_dim, bias=False) self.final_layer_norm = nn.LayerNorm(self.embed_dim) # Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer.forward def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, cross_attn_layer_head_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = True, ) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`. past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value # Fully Connected residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) if use_cache: outputs += (present_key_value,) return outputs class MusicgenPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = MusicgenDecoderConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["MusicgenDecoderLayer", "MusicgenAttention"] _supports_flash_attn_2 = True _supports_sdpa = True def _init_weights(self, module): std = self.config.initializer_factor if isinstance(module, (nn.Linear, nn.Conv1d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() MUSICGEN_START_DOCSTRING = r""" The Musicgen model was proposed in [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi, Alexandre Défossez. It is an encoder decoder transformer trained on the task of conditional music generation This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MusicgenConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ MUSICGEN_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size * num_codebooks, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary, corresponding to the sequence of audio codes. Indices can be obtained by encoding an audio prompt with an audio encoder model to predict audio codes, such as with the [`EncodecModel`]. See [`EncodecModel.encode`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) <Tip warning={true}> The `decoder_input_ids` will automatically be converted from shape `(batch_size * num_codebooks, target_sequence_length)` to `(batch_size, num_codebooks, target_sequence_length)` in the forward pass. If you obtain audio codes from an audio encoding model, such as [`EncodecModel`], ensure that the number of frames is equal to 1, and that you reshape the audio codes from `(frames, batch_size, num_codebooks, target_sequence_length)` to `(batch_size * num_codebooks, target_sequence_length)` prior to passing them as `decoder_input_ids`. </Tip> decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`. labels (`torch.LongTensor` of shape `(batch_size, sequence_length, num_codebooks)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ MUSICGEN_DECODER_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size * num_codebooks, sequence_length)`): Indices of input sequence tokens in the vocabulary, corresponding to the sequence of audio codes. Indices can be obtained by encoding an audio prompt with an audio encoder model to predict audio codes, such as with the [`EncodecModel`]. See [`EncodecModel.encode`] for details. [What are input IDs?](../glossary#input-ids) <Tip warning={true}> The `input_ids` will automatically be converted from shape `(batch_size * num_codebooks, target_sequence_length)` to `(batch_size, num_codebooks, target_sequence_length)` in the forward pass. If you obtain audio codes from an audio encoding model, such as [`EncodecModel`], ensure that the number of frames is equal to 1, and that you reshape the audio codes from `(frames, batch_size, num_codebooks, target_sequence_length)` to `(batch_size * num_codebooks, target_sequence_length)` prior to passing them as `input_ids`. </Tip> attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class MusicgenDecoder(MusicgenPreTrainedModel): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MusicgenDecoderLayer`] """ def __init__(self, config: MusicgenDecoderConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.layerdrop self.max_target_positions = config.max_position_embeddings self.d_model = config.hidden_size self.num_codebooks = config.num_codebooks self.embed_scale = math.sqrt(config.hidden_size) if config.scale_embedding else 1.0 embed_dim = config.vocab_size + 1 self.embed_tokens = nn.ModuleList( [nn.Embedding(embed_dim, config.hidden_size) for _ in range(config.num_codebooks)] ) self.embed_positions = MusicgenSinusoidalPositionalEmbedding( config.max_position_embeddings, config.hidden_size, ) self.layers = nn.ModuleList([MusicgenDecoderLayer(config) for _ in range(config.num_hidden_layers)]) self.layer_norm = nn.LayerNorm(config.hidden_size) self.attn_implementation = config._attn_implementation self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value @add_start_docstrings_to_model_forward(MUSICGEN_DECODER_INPUTS_DOCSTRING) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: # (bsz * codebooks, seq_len) -> (bsz, codebooks, seq_len) input = input_ids.reshape(-1, self.num_codebooks, input_ids.shape[-1]) bsz, num_codebooks, seq_len = input.shape input_shape = (bsz, seq_len) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] input = inputs_embeds[:, :, -1:] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if inputs_embeds is None: inputs_embeds = sum([self.embed_tokens[codebook](input[:, codebook]) for codebook in range(num_codebooks)]) if self.attn_implementation == "flash_attention_2": attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None elif self.attn_implementation == "sdpa" and head_mask is None and not output_attentions: # output_attentions=True & cross_attn_head_mask can not be supported when using SDPA, and we fall back on # the manual implementation that requires a 4D causal mask in all cases. attention_mask = _prepare_4d_causal_attention_mask_for_sdpa( attention_mask, input_shape, inputs_embeds, past_key_values_length, ) else: attention_mask = _prepare_4d_causal_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length ) # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: if self.attn_implementation == "flash_attention_2": encoder_attention_mask = encoder_attention_mask if 0 in encoder_attention_mask else None elif self.attn_implementation == "sdpa" and cross_attn_head_mask is None and not output_attentions: # output_attentions=True & cross_attn_head_mask can not be supported when using SDPA, and we fall back on # the manual implementation that requires a 4D causal mask in all cases. # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa( encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1], ) else: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _prepare_4d_attention_mask( encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] ) # embed positions positions = self.embed_positions(input, past_key_values_length) hidden_states = inputs_embeds + positions.to(inputs_embeds.device) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing`. Setting `use_cache=False`..." ) use_cache = False # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None next_decoder_cache = () if use_cache else None # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): if attn_mask is not None: if attn_mask.size()[0] != len(self.layers): raise ValueError( f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" f" {attn_mask.size()[0]}." ) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if self.training and (dropout_probability < self.layerdrop): continue past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.forward, hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, head_mask[idx] if head_mask is not None else None, cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, None, output_attentions, use_cache, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), cross_attn_layer_head_mask=( cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None ), past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) hidden_states = self.layer_norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) @add_start_docstrings( "The bare Musicgen decoder model outputting raw hidden-states without any specific head on top.", MUSICGEN_START_DOCSTRING, ) class MusicgenModel(MusicgenPreTrainedModel): def __init__(self, config: MusicgenDecoderConfig): super().__init__(config) self.decoder = MusicgenDecoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.decoder.embed_tokens def set_input_embeddings(self, value): self.decoder.embed_tokens = value def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(MUSICGEN_DECODER_INPUTS_DOCSTRING) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=input_ids, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, encoder_hidden_states=encoder_hidden_states, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, ) @add_start_docstrings( "The MusicGen decoder model with a language modelling head on top.", MUSICGEN_START_DOCSTRING, ) class MusicgenForCausalLM(MusicgenPreTrainedModel, GenerationMixin): def __init__(self, config: MusicgenDecoderConfig): super().__init__(config) self.model = MusicgenModel(config) self.num_codebooks = config.num_codebooks self.lm_heads = nn.ModuleList( [nn.Linear(config.hidden_size, config.vocab_size, bias=False) for _ in range(config.num_codebooks)] ) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value def get_output_embeddings(self): return self.lm_heads def set_output_embeddings(self, new_embeddings): self.lm_heads = new_embeddings def set_decoder(self, decoder): self.model.decoder = decoder def get_decoder(self): return self.model.decoder @add_start_docstrings_to_model_forward(MUSICGEN_DECODER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length, num_codebooks)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` Returns: """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (labels is not None) and (input_ids is None and inputs_embeds is None): input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.bos_token_id) outputs = self.model( input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] lm_logits = torch.stack([head(hidden_states) for head in self.lm_heads], dim=1) loss = None if labels is not None: # since encoder hidden states have been concatenated to the decoder hidden states, # we take the last timestamps corresponding to labels logits = lm_logits[:, :, -labels.shape[1] :] loss_fct = CrossEntropyLoss() loss = torch.zeros([], device=self.device) # per codebook cross-entropy # -100 labels are ignored labels = labels.masked_fill(labels == self.config.pad_token_id, -100) # per codebook cross-entropy # ref: https://github.com/facebookresearch/audiocraft/blob/69fea8b290ad1b4b40d28f92d1dfc0ab01dbab85/audiocraft/solvers/musicgen.py#L242-L243 for codebook in range(self.config.num_codebooks): codebook_logits = logits[:, codebook].contiguous().view(-1, logits.shape[-1]) codebook_labels = labels[..., codebook].contiguous().view(-1) loss += loss_fct(codebook_logits, codebook_labels) loss = loss / self.config.num_codebooks # (bsz, num_codebooks, seq_len, vocab_size) -> (bsz * num_codebooks, seq_len, vocab_size) lm_logits = lm_logits.reshape(-1, *lm_logits.shape[2:]) if not return_dict: output = (lm_logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return CausalLMOutputWithCrossAttentions( loss=loss, logits=lm_logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def prepare_inputs_for_generation( self, input_ids, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=True, delay_pattern_mask=None, guidance_scale=None, **kwargs, ): # Overwritten -- MusicGen has custom processing if delay_pattern_mask is None: input_ids, delay_pattern_mask = self.build_delay_pattern_mask( input_ids, pad_token_id=self.generation_config.pad_token_id, max_length=self.generation_config.max_length, ) # apply the delay pattern mask input_ids = self.apply_delay_pattern_mask(input_ids, delay_pattern_mask) if guidance_scale is not None and guidance_scale > 1: # for classifier free guidance we need to replicate the decoder args across the batch dim (we'll split these # before sampling) input_ids = input_ids.repeat((2, 1)) if attention_mask is not None: attention_mask = attention_mask.repeat((2, 1)) if past_key_values is not None: input_ids = input_ids[:, -1:] return { "input_ids": input_ids, "attention_mask": attention_mask, "encoder_hidden_states": encoder_hidden_states, "encoder_attention_mask": encoder_attention_mask, "head_mask": head_mask, "cross_attn_head_mask": cross_attn_head_mask, "past_key_values": past_key_values, "use_cache": use_cache, } def build_delay_pattern_mask(self, input_ids: torch.LongTensor, pad_token_id: int, max_length: int = None): """Build a delayed pattern mask to the input_ids. Each codebook is offset by the previous codebook by one, giving a delayed pattern mask at the start of sequence and end of sequence. Take the example where there are 4 codebooks and a max sequence length of 8, we have the delayed pattern mask of shape `(codebooks, seq_len)`: - [P, -1, -1, -1, -1, P, P, P] - [P, P, -1, -1, -1, -1, P, P] - [P, P, P, -1, -1, -1, -1, P] - [P, P, P, P, -1, -1, -1, -1] where P is the special padding token id and -1 indicates that the token is valid for prediction. If we include a prompt (decoder input ids), the -1 positions indicate where new tokens should be predicted. Otherwise, the mask is set to the value in the prompt: - [P, a, b, -1, -1, P, P, P] - [P, P, c, d, -1, -1, P, P] - [P, P, P, e, f, -1, -1, P] - [P, P, P, P, g, h, -1, -1] where a-h indicate the input prompt (decoder input ids) that are offset by 1. Now, we only override the -1 tokens in our prediction. """ # (bsz * num_codebooks, seq_len) -> (bsz, num_codebooks, seq_len) input_ids = input_ids.reshape(-1, self.num_codebooks, input_ids.shape[-1]) bsz, num_codebooks, seq_len = input_ids.shape max_length = max_length if max_length is not None else self.generation_config.max_length input_ids_shifted = ( torch.ones((bsz, num_codebooks, max_length), dtype=torch.long, device=input_ids.device) * -1 ) channel_codebooks = num_codebooks // 2 if self.config.audio_channels == 2 else num_codebooks # we only apply the mask if we have a large enough seq len - otherwise we return as is if max_length < 2 * channel_codebooks - 1: return input_ids.reshape(bsz * num_codebooks, -1), input_ids_shifted.reshape(bsz * num_codebooks, -1) # fill the shifted ids with the prompt entries, offset by the codebook idx for codebook in range(channel_codebooks): if self.config.audio_channels == 1: # mono channel - loop over the codebooks one-by-one input_ids_shifted[:, codebook, codebook : seq_len + codebook] = input_ids[:, codebook] else: # left/right channels are interleaved in the generated codebooks, so handle one then the other input_ids_shifted[:, 2 * codebook, codebook : seq_len + codebook] = input_ids[:, 2 * codebook] input_ids_shifted[:, 2 * codebook + 1, codebook : seq_len + codebook] = input_ids[:, 2 * codebook + 1] # construct a pattern mask that indicates the positions of padding tokens for each codebook # first fill the upper triangular part (the EOS padding) delay_pattern = torch.triu( torch.ones((channel_codebooks, max_length), dtype=torch.bool), diagonal=max_length - channel_codebooks + 1 ) # then fill the lower triangular part (the BOS padding) delay_pattern = delay_pattern + torch.tril(torch.ones((channel_codebooks, max_length), dtype=torch.bool)) if self.config.audio_channels == 2: # for left/right channel we need to duplicate every row of the pattern mask in an interleaved fashion delay_pattern = delay_pattern.repeat_interleave(2, dim=0) mask = ~delay_pattern.to(input_ids.device) input_ids = mask * input_ids_shifted + ~mask * pad_token_id # find the first position to start generating - this is the first place we have the -1 token # and will always be in the first codebook (since it has no codebook offset) first_codebook_ids = input_ids[:, 0, :] start_ids = (first_codebook_ids == -1).nonzero()[:, 1] if len(start_ids) > 0: first_start_id = min(start_ids) else: # we have no tokens that need to be filled - return entire matrix of input ids first_start_id = seq_len # (bsz * num_codebooks, seq_len) -> (bsz, num_codebooks, seq_len) pattern_mask = input_ids.reshape(bsz * num_codebooks, -1) input_ids = input_ids[..., :first_start_id].reshape(bsz * num_codebooks, -1) return input_ids, pattern_mask @staticmethod def apply_delay_pattern_mask(input_ids, decoder_pad_token_mask): """Apply a delay pattern mask to the decoder input ids, only preserving predictions where the mask is set to -1, and otherwise setting to the value detailed in the mask.""" seq_len = input_ids.shape[-1] decoder_pad_token_mask = decoder_pad_token_mask[..., :seq_len] input_ids = torch.where(decoder_pad_token_mask == -1, input_ids, decoder_pad_token_mask) return input_ids @torch.no_grad() def generate( self, inputs: Optional[torch.Tensor] = None, generation_config: Optional[GenerationConfig] = None, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, synced_gpus: Optional[bool] = None, streamer: Optional["BaseStreamer"] = None, **kwargs, ): """ Generates sequences of token ids for models with a language modeling head. <Tip warning={true}> Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the model's default generation configuration. You can override any `generation_config` by passing the corresponding parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`. For an overview of generation strategies and code examples, check out the [following guide](./generation_strategies). </Tip> Parameters: inputs (`torch.Tensor` of varying shape depending on the modality, *optional*): The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs` should be in the format `input_ids`. For encoder-decoder models *inputs* can represent any of `input_ids`, `input_values`, `input_features`, or `pixel_values`. generation_config (`~generation.GenerationConfig`, *optional*): The generation configuration to be used as base parametrization for the generation call. `**kwargs` passed to generate matching the attributes of `generation_config` will override them. If `generation_config` is not provided, the default will be used, which had the following loading priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s default values, whose documentation should be checked to parameterize generation. logits_processor (`LogitsProcessorList`, *optional*): Custom logits processors that complement the default logits processors built from arguments and generation config. If a logit processor is passed that is already created with the arguments or a generation config an error is thrown. This feature is intended for advanced users. stopping_criteria (`StoppingCriteriaList`, *optional*): Custom stopping criteria that complement the default stopping criteria built from arguments and a generation config. If a stopping criteria is passed that is already created with the arguments or a generation config an error is thrown. This feature is intended for advanced users. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed to avoid deadlocking with `FullyShardedDataParallel` and DeepSpeed ZeRO Stage 3). streamer (`BaseStreamer`, *optional*): Streamer object that will be used to stream the generated sequences. Generated tokens are passed through `streamer.put(token_ids)` and the streamer is responsible for any further processing. kwargs (`Dict[str, Any]`, *optional*): Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*. Return: [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True` or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible [`~utils.ModelOutput`] types are: - [`~generation.GenerateDecoderOnlyOutput`], - [`~generation.GenerateBeamDecoderOnlyOutput`] If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible [`~utils.ModelOutput`] types are: - [`~generation.GenerateEncoderDecoderOutput`], - [`~generation.GenerateBeamEncoderDecoderOutput`] """ # 1. Handle `generation_config` and kwargs that might update it, and validate the resulting objects if generation_config is None: generation_config = self.generation_config generation_config = copy.deepcopy(generation_config) model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs generation_config.validate() self._validate_model_kwargs(model_kwargs.copy()) # 2. Set generation parameters if not already defined logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() requires_attention_mask = "encoder_outputs" not in model_kwargs kwargs_has_attention_mask = model_kwargs.get("attention_mask", None) is not None # 3. Define model inputs` input_ids, model_input_name, model_kwargs = self._prepare_model_inputs( inputs, generation_config.bos_token_id, model_kwargs ) batch_size = input_ids.shape[0] // self.num_codebooks self._prepare_special_tokens(generation_config, kwargs_has_attention_mask, device=input_ids.device) # 4. Define other model kwargs model_kwargs["use_cache"] = generation_config.use_cache model_kwargs["guidance_scale"] = generation_config.guidance_scale if model_kwargs.get("attention_mask", None) is None and requires_attention_mask: model_kwargs["attention_mask"] = self._prepare_attention_mask_for_generation( input_ids, generation_config, model_kwargs ) # 5. Prepare `max_length` depending on other stopping criteria. input_ids_length = input_ids.shape[-1] has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None has_default_min_length = kwargs.get("min_length") is None and generation_config.min_length is not None generation_config = self._prepare_generated_length( generation_config=generation_config, has_default_max_length=has_default_max_length, has_default_min_length=has_default_min_length, model_input_name=model_input_name, inputs_tensor=input_ids, input_ids_length=input_ids_length, ) # 6. Prepare `input_ids` which will be used for auto-regressive generation # Build the delay pattern mask for offsetting each codebook prediction by 1 (this behaviour is specific to MusicGen) input_ids, delay_pattern_mask = self.build_delay_pattern_mask( input_ids, pad_token_id=generation_config._decoder_start_token_tensor, max_length=generation_config.max_length, ) if streamer is not None: streamer.put(input_ids.cpu()) # stash the delay mask so that we don't have to recompute it in each forward pass model_kwargs["delay_pattern_mask"] = delay_pattern_mask # 7. determine generation mode generation_mode = generation_config.get_generation_mode() # 8. prepare batched CFG externally (to enable coexistance with the unbatched CFG) if generation_config.guidance_scale is not None and generation_config.guidance_scale > 1: logits_processor.append(ClassifierFreeGuidanceLogitsProcessor(generation_config.guidance_scale)) generation_config.guidance_scale = None # 9. prepare distribution pre_processing samplers logits_processor = self._get_logits_processor( generation_config=generation_config, input_ids_seq_length=input_ids_length, encoder_input_ids=input_ids, prefix_allowed_tokens_fn=None, logits_processor=logits_processor, device=input_ids.device, ) # 10. prepare stopping criteria stopping_criteria = self._get_stopping_criteria( generation_config=generation_config, stopping_criteria=stopping_criteria ) if generation_mode in (GenerationMode.SAMPLE, GenerationMode.GREEDY_SEARCH): # expand input_ids with `num_return_sequences` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids=input_ids, expand_size=generation_config.num_return_sequences, **model_kwargs, ) # 11. run sample outputs = self._sample( input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria, generation_config=generation_config, synced_gpus=synced_gpus, streamer=streamer, **model_kwargs, ) else: raise ValueError( "Got incompatible mode for generation, should be one of greedy or sampling. " "Ensure that beam search is de-activated by setting `num_beams=1` and `num_beam_groups=1`." ) if generation_config.return_dict_in_generate: output_ids = outputs.sequences else: output_ids = outputs # apply the pattern mask to the final ids output_ids = self.apply_delay_pattern_mask(output_ids, model_kwargs["delay_pattern_mask"]) # revert the pattern delay mask by filtering the pad token id output_ids = output_ids[output_ids != generation_config._pad_token_tensor].reshape( batch_size, self.num_codebooks, -1 ) if generation_config.return_dict_in_generate: outputs.sequences = output_ids return outputs else: return output_ids @add_start_docstrings( "The composite MusicGen model with a text encoder, audio encoder and Musicgen decoder, " "for music generation tasks with one or both of text and audio prompts.", MUSICGEN_START_DOCSTRING, ) class MusicgenForConditionalGeneration(PreTrainedModel, GenerationMixin): config_class = MusicgenConfig base_model_prefix = "encoder_decoder" main_input_name = "input_ids" supports_gradient_checkpointing = True _supports_flash_attn_2 = True _supports_sdpa = True def __init__( self, config: Optional[MusicgenConfig] = None, text_encoder: Optional[PreTrainedModel] = None, audio_encoder: Optional[PreTrainedModel] = None, decoder: Optional[MusicgenForCausalLM] = None, ): if config is None and (text_encoder is None or audio_encoder is None or decoder is None): raise ValueError( "Either a configuration has to be provided, or all three of text encoder, audio encoder and MusicGen decoder." ) if config is None: config = MusicgenConfig.from_sub_models_config(text_encoder.config, audio_encoder.config, decoder.config) else: if not isinstance(config, self.config_class): raise ValueError(f"Config: {config} has to be of type {self.config_class}") if config.decoder.cross_attention_hidden_size is not None: if config.decoder.cross_attention_hidden_size != config.text_encoder.hidden_size: raise ValueError( "If `cross_attention_hidden_size` is specified in the MusicGen decoder's configuration, it has to be equal" f" to the text encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for" f" `config.decoder.cross_attention_hidden_size` and {config.text_encoder.hidden_size} for" " `config.text_encoder.hidden_size`." ) # initialize with config super().__init__(config) if text_encoder is None: from ..auto.modeling_auto import AutoModelForTextEncoding text_encoder = AutoModelForTextEncoding.from_config(config.text_encoder) if audio_encoder is None: from ..auto.modeling_auto import AutoModel audio_encoder = AutoModel.from_config(config.audio_encoder) if decoder is None: decoder = MusicgenForCausalLM._from_config(config.decoder) self.text_encoder = text_encoder self.audio_encoder = audio_encoder self.decoder = decoder if self.text_encoder.config.to_dict() != self.config.text_encoder.to_dict(): logger.warning( f"Config of the text_encoder: {self.text_encoder.__class__} is overwritten by shared text_encoder config:" f" {self.config.text_encoder}" ) if self.audio_encoder.config.to_dict() != self.config.audio_encoder.to_dict(): logger.warning( f"Config of the audio_encoder: {self.audio_encoder.__class__} is overwritten by shared audio_encoder config:" f" {self.config.audio_encoder}" ) if self.decoder.config.to_dict() != self.config.decoder.to_dict(): logger.warning( f"Config of the decoder: {self.decoder.__class__} is overwritten by shared decoder config:" f" {self.config.decoder}" ) # make sure that the individual model's config refers to the shared config # so that the updates to the config will be synced self.config.text_encoder._attn_implementation = self.text_encoder.config._attn_implementation self.config.audio_encoder._attn_implementation = self.audio_encoder.config._attn_implementation self.config.decoder._attn_implementation = self.decoder.config._attn_implementation self.text_encoder.config = self.config.text_encoder self.audio_encoder.config = self.config.audio_encoder self.decoder.config = self.config.decoder # text encoder outputs might need to be projected to different dimension for decoder if ( self.text_encoder.config.hidden_size != self.decoder.config.hidden_size and self.decoder.config.cross_attention_hidden_size is None ): self.enc_to_dec_proj = nn.Linear(self.text_encoder.config.hidden_size, self.decoder.config.hidden_size) if self.text_encoder.get_output_embeddings() is not None: raise ValueError( f"The encoder {self.text_encoder} should not have a LM Head. Please use a model without and LM Head" ) decoder_signature = set(inspect.signature(self.decoder.forward).parameters.keys()) if "encoder_hidden_states" not in decoder_signature: raise ValueError( "The selected decoder is not prepared for the encoder hidden states to be passed. Please see the " "following discussion on GitHub: https://github.com/huggingface/transformers/issues/23350" ) # tie text encoder, decoder weights if config set accordingly self.tie_weights() def tie_weights(self): # tie text encoder & decoder if needed if self.config.tie_encoder_decoder: # tie text encoder and decoder base model decoder_base_model_prefix = self.decoder.base_model_prefix tied_weights = self._tie_encoder_decoder_weights( self.text_encoder, self.decoder._modules[decoder_base_model_prefix], self.decoder.base_model_prefix, "text_encoder", ) # Setting a dynamic variable instead of `_tied_weights_keys` because it's a class # attributed not an instance member, therefore modifying it will modify the entire class # Leading to issues on subsequent calls by different tests or subsequent calls. self._dynamic_tied_weights_keys = tied_weights def get_audio_encoder(self): return self.audio_encoder def get_text_encoder(self): return self.text_encoder def get_encoder(self): # get the text encoder to compute the encoder hidden-states for generation return self.get_text_encoder() def get_decoder(self): return self.decoder def get_input_embeddings(self): return self.text_encoder.get_input_embeddings() def get_output_embeddings(self): return self.decoder.get_output_embeddings() def set_output_embeddings(self, new_embeddings): return self.decoder.set_output_embeddings(new_embeddings) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Example: ```python >>> from transformers import MusicgenForConditionalGeneration >>> model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small") ```""" # At the moment fast initialization is not supported for composite models if kwargs.get("_fast_init", False): logger.warning( "Fast initialization is currently not supported for MusicgenForConditionalGeneration. " "Falling back to slow initialization..." ) kwargs["_fast_init"] = False return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) @classmethod def from_sub_models_pretrained( cls, text_encoder_pretrained_model_name_or_path: str = None, audio_encoder_pretrained_model_name_or_path: str = None, decoder_pretrained_model_name_or_path: str = None, *model_args, **kwargs, ) -> PreTrainedModel: r""" Instantiate a text encoder, an audio encoder, and a MusicGen decoder from one, two or three base classes of the library from pretrained model checkpoints. The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train the model, you need to first set it back in training mode with `model.train()`. Params: text_encoder_pretrained_model_name_or_path (`str`, *optional*): Information necessary to initiate the text encoder. Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. audio_encoder_pretrained_model_name_or_path (`str`, *optional*): Information necessary to initiate the audio encoder. Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`): Information necessary to initiate the decoder. Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. model_args (remaining positional arguments, *optional*): All remaining positional arguments will be passed to the underlying model's `__init__` method. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., `output_attentions=True`). - To update the text encoder configuration, use the prefix *text_encoder_* for each configuration parameter. - To update the audio encoder configuration, use the prefix *audio_encoder_* for each configuration parameter. - To update the decoder configuration, use the prefix *decoder_* for each configuration parameter. - To update the parent model configuration, do not use a prefix for each configuration parameter. Behaves differently depending on whether a `config` is provided or automatically loaded. Example: ```python >>> from transformers import MusicgenForConditionalGeneration >>> # initialize a musicgen model from a t5 text encoder, encodec audio encoder, and musicgen decoder >>> model = MusicgenForConditionalGeneration.from_sub_models_pretrained( ... text_encoder_pretrained_model_name_or_path="google-t5/t5-base", ... audio_encoder_pretrained_model_name_or_path="facebook/encodec_24khz", ... decoder_pretrained_model_name_or_path="facebook/musicgen-small", ... ) >>> # saving model after fine-tuning >>> model.save_pretrained("./musicgen-ft") >>> # load fine-tuned model >>> model = MusicgenForConditionalGeneration.from_pretrained("./musicgen-ft") ```""" kwargs_text_encoder = { argument[len("text_encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("text_encoder_") } kwargs_audio_encoder = { argument[len("audio_encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("audio_encoder_") } kwargs_decoder = { argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_") } # remove text encoder, audio encoder and decoder kwargs from kwargs for key in kwargs_text_encoder.keys(): del kwargs["text_encoder_" + key] for key in kwargs_audio_encoder.keys(): del kwargs["audio_encoder_" + key] for key in kwargs_decoder.keys(): del kwargs["decoder_" + key] # Load and initialize the encoder and decoder # The distinction between encoder and decoder at the model level is made # by the value of the flag `is_decoder` that we need to set correctly. text_encoder = kwargs_text_encoder.pop("model", None) if text_encoder is None: if text_encoder_pretrained_model_name_or_path is None: raise ValueError( "If `text_encoder_model` is not defined as an argument, a `text_encoder_pretrained_model_name_or_path` has " "to be defined." ) if "config" not in kwargs_text_encoder: encoder_config, kwargs_text_encoder = AutoConfig.from_pretrained( text_encoder_pretrained_model_name_or_path, **kwargs_text_encoder, return_unused_kwargs=True ) if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True: logger.info( f"Initializing {text_encoder_pretrained_model_name_or_path} as a text_encoder model " "from a decoder model. Cross-attention and casual mask are disabled." ) encoder_config.is_decoder = False encoder_config.add_cross_attention = False kwargs_text_encoder["config"] = encoder_config text_encoder = AutoModel.from_pretrained( text_encoder_pretrained_model_name_or_path, *model_args, **kwargs_text_encoder ) audio_encoder = kwargs_audio_encoder.pop("model", None) if audio_encoder is None: if audio_encoder_pretrained_model_name_or_path is None: raise ValueError( "If `audio_encoder_model` is not defined as an argument, an `audio_encoder_pretrained_model_name_or_path` has " "to be defined." ) if "config" not in kwargs_audio_encoder: encoder_config, kwargs_audio_encoder = AutoConfig.from_pretrained( audio_encoder_pretrained_model_name_or_path, **kwargs_audio_encoder, return_unused_kwargs=True ) if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True: logger.info( f"Initializing {audio_encoder_pretrained_model_name_or_path} as an audio_encoder model " "from a decoder model. Cross-attention and casual mask are disabled." ) encoder_config.is_decoder = False encoder_config.add_cross_attention = False kwargs_audio_encoder["config"] = encoder_config audio_encoder = AutoModel.from_pretrained( audio_encoder_pretrained_model_name_or_path, *model_args, **kwargs_audio_encoder ) decoder = kwargs_decoder.pop("model", None) if decoder is None: if decoder_pretrained_model_name_or_path is None: raise ValueError( "If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has " "to be defined." ) if "config" not in kwargs_decoder: decoder_config, kwargs_decoder = AutoConfig.from_pretrained( decoder_pretrained_model_name_or_path, **kwargs_decoder, return_unused_kwargs=True ) if isinstance(decoder_config, MusicgenConfig): decoder_config = decoder_config.decoder if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False: logger.info( f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention" f" layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if" f" {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers." ) decoder_config.is_decoder = True decoder_config.add_cross_attention = True kwargs_decoder["config"] = decoder_config if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False: logger.warning( f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. " f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, " "make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` " "passed to `.from_sub_models_pretrained(...)` are set to `True` or do not pass a " "`decoder_config` to `.from_sub_models_pretrained(...)`" ) decoder = MusicgenForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder) # instantiate config with corresponding kwargs config = MusicgenConfig.from_sub_models_config( text_encoder.config, audio_encoder.config, decoder.config, **kwargs ) return cls(text_encoder=text_encoder, audio_encoder=audio_encoder, decoder=decoder, config=config) @add_start_docstrings_to_model_forward(MUSICGEN_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.BoolTensor] = None, input_values: Optional[torch.FloatTensor] = None, padding_mask: Optional[torch.BoolTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None, past_key_values: Tuple[Tuple[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[Tuple, Seq2SeqLMOutput]: r""" Returns: Examples: ```python >>> from transformers import AutoProcessor, MusicgenForConditionalGeneration >>> import torch >>> processor = AutoProcessor.from_pretrained("facebook/musicgen-small") >>> model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small") >>> inputs = processor( ... text=["80s pop track with bassy drums and synth", "90s rock song with loud guitars and heavy drums"], ... padding=True, ... return_tensors="pt", ... ) >>> pad_token_id = model.generation_config.pad_token_id >>> decoder_input_ids = ( ... torch.ones((inputs.input_ids.shape[0] * model.decoder.num_codebooks, 1), dtype=torch.long) ... * pad_token_id ... ) >>> logits = model(**inputs, decoder_input_ids=decoder_input_ids).logits >>> logits.shape # (bsz * num_codebooks, tgt_len, vocab_size) torch.Size([8, 1, 2048]) ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict kwargs_text_encoder = { argument[len("text_encoder_")]: value for argument, value in kwargs.items() if argument.startswith("text_encoder_") } kwargs_audio_encoder = { argument[len("audio_encoder_")]: value for argument, value in kwargs.items() if argument.startswith("audio_encoder_") } kwargs_decoder = { argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_") } if encoder_outputs is None: encoder_outputs = self.text_encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs_text_encoder, ) elif isinstance(encoder_outputs, tuple): encoder_outputs = BaseModelOutput(*encoder_outputs) encoder_hidden_states = encoder_outputs[0] # optionally project encoder_hidden_states if ( self.text_encoder.config.hidden_size != self.decoder.config.hidden_size and self.decoder.config.cross_attention_hidden_size is None ): encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states) if attention_mask is not None: encoder_hidden_states = encoder_hidden_states * attention_mask[..., None] if (labels is not None) and (decoder_input_ids is None and decoder_inputs_embeds is None): decoder_input_ids = shift_tokens_right( labels, self.config.decoder.pad_token_id, self.config.decoder.decoder_start_token_id ) elif decoder_input_ids is None and decoder_inputs_embeds is None: audio_encoder_outputs = self.audio_encoder( input_values=input_values, padding_mask=padding_mask, **kwargs_audio_encoder, ) audio_codes = audio_encoder_outputs.audio_codes frames, bsz, codebooks, seq_len = audio_codes.shape if frames != 1: raise ValueError( f"Expected 1 frame in the audio code outputs, got {frames} frames. Ensure chunking is " "disabled by setting `chunk_length=None` in the audio encoder." ) if self.config.decoder.audio_channels == 2 and audio_codes.shape[2] == self.decoder.num_codebooks // 2: # mono input through encodec that we convert to stereo audio_codes = audio_codes.repeat_interleave(2, dim=2) decoder_input_ids = audio_codes[0, ...].reshape(bsz * self.decoder.num_codebooks, seq_len) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=attention_mask, inputs_embeds=decoder_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, use_cache=use_cache, past_key_values=past_key_values, return_dict=return_dict, labels=labels, **kwargs_decoder, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqLMOutput( loss=decoder_outputs.loss, logits=decoder_outputs.logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def prepare_inputs_for_generation( self, decoder_input_ids, past_key_values=None, attention_mask=None, head_mask=None, decoder_attention_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, decoder_delay_pattern_mask=None, guidance_scale=None, **kwargs, ): # Overwritten -- MusicGen has custom processing if decoder_delay_pattern_mask is None: decoder_input_ids, decoder_delay_pattern_mask = self.decoder.build_delay_pattern_mask( decoder_input_ids, self.generation_config.pad_token_id, max_length=self.generation_config.max_length, ) # apply the delay pattern mask decoder_input_ids = self.decoder.apply_delay_pattern_mask(decoder_input_ids, decoder_delay_pattern_mask) if guidance_scale is not None and guidance_scale > 1: # for classifier free guidance we need to replicate the decoder args across the batch dim (we'll split these # before sampling) decoder_input_ids = decoder_input_ids.repeat((2, 1)) if decoder_attention_mask is not None: decoder_attention_mask = decoder_attention_mask.repeat((2, 1)) if past_key_values is not None: past_length = past_key_values[0][0].shape[2] # Some generation methods already pass only the last input ID if decoder_input_ids.shape[1] > past_length: remove_prefix_length = past_length else: # Default to old behavior: keep only final ID remove_prefix_length = decoder_input_ids.shape[1] - 1 decoder_input_ids = decoder_input_ids[:, remove_prefix_length:] return { "input_ids": None, # encoder_outputs is defined. input_ids not needed "encoder_outputs": encoder_outputs, "past_key_values": past_key_values, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, "use_cache": use_cache, } def _prepare_decoder_input_ids_for_generation( self, batch_size: int, model_input_name: str, model_kwargs: Dict[str, torch.Tensor], decoder_start_token_id: int = None, bos_token_id: int = None, device: torch.device = None, ) -> Tuple[torch.LongTensor, Dict[str, torch.Tensor]]: """Prepares `decoder_input_ids` for generation with encoder-decoder models""" # 1. Check whether the user has defined `decoder_input_ids` manually. To facilitate in terms of input naming, # we also allow the user to pass it under `input_ids`, if the encoder does not use it as the main input. if model_kwargs is not None and "decoder_input_ids" in model_kwargs: decoder_input_ids = model_kwargs.pop("decoder_input_ids") elif "input_ids" in model_kwargs and model_input_name != "input_ids": decoder_input_ids = model_kwargs.pop("input_ids") else: decoder_input_ids = None # 2. Encoder-decoder models expect the `decoder_input_ids` to start with a special token. Let's ensure that. decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id) if device is None: device = self.device decoder_input_ids_start = ( torch.ones((batch_size * self.decoder.num_codebooks, 1), dtype=torch.long, device=device) * decoder_start_token_id ) # no user input -> use decoder_start_token_id as decoder_input_ids if decoder_input_ids is None: decoder_input_ids = decoder_input_ids_start # user input but doesn't start with decoder_start_token_id -> prepend decoder_start_token_id (and adjust # decoder_attention_mask if provided) elif (decoder_input_ids[..., 0] != decoder_start_token_id).all().item(): decoder_input_ids = torch.cat([decoder_input_ids_start, decoder_input_ids], dim=-1) if "decoder_attention_mask" in model_kwargs: decoder_attention_mask = model_kwargs["decoder_attention_mask"] decoder_attention_mask = torch.cat( (torch.ones_like(decoder_attention_mask)[:, :1], decoder_attention_mask), dim=-1, ) model_kwargs["decoder_attention_mask"] = decoder_attention_mask return decoder_input_ids, model_kwargs def _prepare_text_encoder_kwargs_for_generation( self, inputs_tensor: torch.Tensor, model_kwargs, model_input_name: Optional[str], generation_config: GenerationConfig, ) -> Dict[str, Any]: # 1. get text encoder encoder = self.get_text_encoder() # Compatibility with Accelerate big model inference: we need the encoder to outputs stuff on the same device # as the inputs. if hasattr(encoder, "_hf_hook"): encoder._hf_hook.io_same_device = True # 2. Prepare encoder args and encoder kwargs from model kwargs. irrelevant_prefix = ["decoder_", "cross_attn", "use_cache"] encoder_kwargs = { argument: value for argument, value in model_kwargs.items() if not any(argument.startswith(p) for p in irrelevant_prefix) } encoder_signature = set(inspect.signature(encoder.forward).parameters) encoder_accepts_wildcard = "kwargs" in encoder_signature or "model_kwargs" in encoder_signature if not encoder_accepts_wildcard: encoder_kwargs = { argument: value for argument, value in encoder_kwargs.items() if argument in encoder_signature } encoder_kwargs["output_attentions"] = generation_config.output_attentions encoder_kwargs["output_hidden_states"] = generation_config.output_hidden_states guidance_scale = generation_config.guidance_scale # 3. make sure that encoder returns `ModelOutput` model_input_name = model_input_name if model_input_name is not None else self.text_encoder.main_input_name encoder_kwargs["return_dict"] = True encoder_kwargs[model_input_name] = inputs_tensor last_hidden_state = encoder(**encoder_kwargs).last_hidden_state # for classifier free guidance we need to add a 'null' input to our encoder hidden states if guidance_scale is not None and guidance_scale > 1: last_hidden_state = torch.concatenate([last_hidden_state, torch.zeros_like(last_hidden_state)], dim=0) if "attention_mask" in model_kwargs: model_kwargs["attention_mask"] = torch.concatenate( [model_kwargs["attention_mask"], torch.zeros_like(model_kwargs["attention_mask"])], dim=0 ) model_kwargs["encoder_outputs"] = BaseModelOutput(last_hidden_state=last_hidden_state) return model_kwargs def _prepare_audio_encoder_kwargs_for_generation( self, input_values, model_kwargs, model_input_name: Optional[str] = None ): # 1. get audio encoder encoder = self.get_audio_encoder() # Compatibility with Accelerate big model inference: we need the encoder to outputs stuff on the same device # as the inputs. if hasattr(encoder, "_hf_hook"): encoder._hf_hook.io_same_device = True # 2. Prepare encoder args and encoder kwargs from model kwargs. irrelevant_prefix = ["decoder_", "cross_attn", "use_cache"] encoder_kwargs = { argument: value for argument, value in model_kwargs.items() if not any(argument.startswith(p) for p in irrelevant_prefix) } encoder_signature = set(inspect.signature(encoder.forward).parameters) encoder_accepts_wildcard = "kwargs" in encoder_signature or "model_kwargs" in encoder_signature if not encoder_accepts_wildcard: encoder_kwargs = { argument: value for argument, value in encoder_kwargs.items() if argument in encoder_signature } # 3. make sure that encoder returns `ModelOutput` model_input_name = model_input_name if model_input_name is not None else self.audio_encoder.main_input_name encoder_kwargs["return_dict"] = True if self.decoder.config.audio_channels == 1: encoder_kwargs[model_input_name] = input_values audio_encoder_outputs = encoder.encode(**encoder_kwargs) audio_codes = audio_encoder_outputs.audio_codes audio_scales = audio_encoder_outputs.audio_scales frames, bsz, codebooks, seq_len = audio_codes.shape else: if input_values.shape[1] != 2: raise ValueError( f"Expected stereo audio (2-channels) but example has {input_values.shape[1]} channel." ) encoder_kwargs[model_input_name] = input_values[:, :1, :] audio_encoder_outputs_left = encoder.encode(**encoder_kwargs) audio_codes_left = audio_encoder_outputs_left.audio_codes audio_scales_left = audio_encoder_outputs_left.audio_scales encoder_kwargs[model_input_name] = input_values[:, 1:, :] audio_encoder_outputs_right = encoder.encode(**encoder_kwargs) audio_codes_right = audio_encoder_outputs_right.audio_codes audio_scales_right = audio_encoder_outputs_right.audio_scales frames, bsz, codebooks, seq_len = audio_codes_left.shape # copy alternating left/right channel codes into stereo codebook audio_codes = audio_codes_left.new_ones((frames, bsz, 2 * codebooks, seq_len)) audio_codes[:, :, ::2, :] = audio_codes_left audio_codes[:, :, 1::2, :] = audio_codes_right if audio_scales_left != [None] or audio_scales_right != [None]: audio_scales = torch.stack([audio_scales_left, audio_scales_right], dim=1) else: audio_scales = [None] * bsz if frames != 1: raise ValueError( f"Expected 1 frame in the audio code outputs, got {frames} frames. Ensure chunking is " "disabled by setting `chunk_length=None` in the audio encoder." ) decoder_input_ids = audio_codes[0, ...].reshape(bsz * self.decoder.num_codebooks, seq_len) model_kwargs["decoder_input_ids"] = decoder_input_ids model_kwargs["audio_scales"] = audio_scales return model_kwargs def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return shift_tokens_right(labels, self.config.decoder.pad_token_id, self.config.decoder.bos_token_id) def resize_token_embeddings(self, *args, **kwargs): raise NotImplementedError( "Resizing the embedding layers via the EncoderDecoderModel directly is not supported. Please use the" " respective methods of the wrapped objects (model.encoder.resize_token_embeddings(...) or" " model.decoder.resize_token_embeddings(...))" ) def freeze_audio_encoder(self): """ Freeze the audio encoder weights. """ for param in self.audio_encoder.parameters(): param.requires_grad = False self.audio_encoder._requires_grad = False def freeze_text_encoder(self): """ Freeze the text encoder weights. """ for param in self.text_encoder.parameters(): param.requires_grad = False self.text_encoder._requires_grad = False def _maybe_initialize_input_ids_for_generation( self, inputs: Optional[torch.Tensor] = None, bos_token_id: Optional[int] = None, model_kwargs: Optional[Dict[str, torch.Tensor]] = None, ) -> torch.LongTensor: """Initializes input ids for generation, if necessary.""" if inputs is not None: return inputs encoder_outputs = model_kwargs.get("encoder_outputs") if encoder_outputs is not None: # make dummy input_ids with value -100, as a sanity check ensuring that they won't be used for encoding shape = encoder_outputs[0].size()[:-1] return torch.ones(shape, dtype=torch.long, device=self.device) * -100 if bos_token_id is None: raise ValueError("`bos_token_id` has to be defined when no `input_ids` are provided.") # If there is some tensor in `model_kwargs`, we can infer the batch size from it. This is helpful with # soft-prompting or in multimodal implementations built on top of decoder-only language models. batch_size = 1 for value in model_kwargs.values(): if isinstance(value, torch.Tensor): batch_size = value.shape[0] break return torch.ones((batch_size, 1), dtype=torch.long, device=self.device) * bos_token_id def _get_decoder_start_token_id( self, decoder_start_token_id: Union[int, List[int]] = None, bos_token_id: int = None ) -> int: decoder_start_token_id = ( decoder_start_token_id if decoder_start_token_id is not None else self.generation_config.decoder_start_token_id ) bos_token_id = bos_token_id if bos_token_id is not None else self.generation_config.bos_token_id if decoder_start_token_id is not None: return decoder_start_token_id elif bos_token_id is not None: return bos_token_id raise ValueError( "`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation." ) @torch.no_grad() def generate( self, inputs: Optional[torch.Tensor] = None, generation_config: Optional[GenerationConfig] = None, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, synced_gpus: Optional[bool] = None, streamer: Optional["BaseStreamer"] = None, **kwargs, ): """ Generates sequences of token ids for models with a language modeling head. <Tip warning={true}> Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the model's default generation configuration. You can override any `generation_config` by passing the corresponding parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`. For an overview of generation strategies and code examples, check out the [following guide](./generation_strategies). </Tip> Parameters: inputs (`torch.Tensor` of varying shape depending on the modality, *optional*): The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs` should be in the format `input_ids`. For encoder-decoder models *inputs* can represent any of `input_ids`, `input_values`, `input_features`, or `pixel_values`. generation_config (`~generation.GenerationConfig`, *optional*): The generation configuration to be used as base parametrization for the generation call. `**kwargs` passed to generate matching the attributes of `generation_config` will override them. If `generation_config` is not provided, the default will be used, which had the following loading priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s default values, whose documentation should be checked to parameterize generation. logits_processor (`LogitsProcessorList`, *optional*): Custom logits processors that complement the default logits processors built from arguments and generation config. If a logit processor is passed that is already created with the arguments or a generation config an error is thrown. This feature is intended for advanced users. stopping_criteria (`StoppingCriteriaList`, *optional*): Custom stopping criteria that complement the default stopping criteria built from arguments and a generation config. If a stopping criteria is passed that is already created with the arguments or a generation config an error is thrown. This feature is intended for advanced users. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed to avoid deadlocking with `FullyShardedDataParallel` and DeepSpeed ZeRO Stage 3). streamer (`BaseStreamer`, *optional*): Streamer object that will be used to stream the generated sequences. Generated tokens are passed through `streamer.put(token_ids)` and the streamer is responsible for any further processing. kwargs (`Dict[str, Any]`, *optional*): Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*. Return: [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True` or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible [`~utils.ModelOutput`] types are: - [`~generation.GenerateDecoderOnlyOutput`], - [`~generation.GenerateBeamDecoderOnlyOutput`] If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible [`~utils.ModelOutput`] types are: - [`~generation.GenerateEncoderDecoderOutput`], - [`~generation.GenerateBeamEncoderDecoderOutput`] """ # 1. Handle `generation_config` and kwargs that might update it, and validate the resulting objects if generation_config is None: generation_config = self.generation_config generation_config = copy.deepcopy(generation_config) model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs generation_config.validate() self._validate_model_kwargs(model_kwargs.copy()) if model_kwargs.get("encoder_outputs") is not None and type(model_kwargs["encoder_outputs"]) is tuple: # wrap the unconditional outputs as a BaseModelOutput for compatibility with the rest of generate model_kwargs["encoder_outputs"] = BaseModelOutput(last_hidden_state=model_kwargs["encoder_outputs"][0]) # 2. Set generation parameters if not already defined logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() requires_attention_mask = "encoder_outputs" not in model_kwargs kwargs_has_attention_mask = model_kwargs.get("attention_mask", None) is not None # 3. Define model inputs inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs( inputs, generation_config.bos_token_id, model_kwargs ) batch_size = inputs_tensor.shape[0] self._prepare_special_tokens(generation_config, kwargs_has_attention_mask, device=inputs_tensor.device) # 4. Define other model kwargs model_kwargs["use_cache"] = generation_config.use_cache model_kwargs["guidance_scale"] = generation_config.guidance_scale if model_kwargs.get("attention_mask", None) is None and requires_attention_mask: model_kwargs["attention_mask"] = self._prepare_attention_mask_for_generation( inputs_tensor, generation_config, model_kwargs ) if "encoder_outputs" not in model_kwargs: # encoder_outputs are created and added to `model_kwargs` model_kwargs = self._prepare_text_encoder_kwargs_for_generation( inputs_tensor, model_kwargs, model_input_name, generation_config ) if "decoder_input_ids" not in model_kwargs and "input_values" in model_kwargs: model_kwargs = self._prepare_audio_encoder_kwargs_for_generation( model_kwargs["input_values"], model_kwargs, ) # 5. Prepare `input_ids` which will be used for auto-regressive generation input_ids, model_kwargs = self._prepare_decoder_input_ids_for_generation( batch_size=batch_size, model_input_name=model_input_name, model_kwargs=model_kwargs, decoder_start_token_id=generation_config._decoder_start_token_tensor, bos_token_id=generation_config._bos_token_tensor, device=inputs_tensor.device, ) # 6. Prepare `max_length` depending on other stopping criteria. input_ids_length = input_ids.shape[-1] has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None has_default_min_length = kwargs.get("min_length") is None and generation_config.min_length is not None generation_config = self._prepare_generated_length( generation_config=generation_config, has_default_max_length=has_default_max_length, has_default_min_length=has_default_min_length, model_input_name=model_input_name, inputs_tensor=inputs_tensor, input_ids_length=input_ids_length, ) # build the delay pattern mask for offsetting each codebook prediction by 1 (this behaviour is specific to MusicGen) input_ids, decoder_delay_pattern_mask = self.decoder.build_delay_pattern_mask( input_ids, pad_token_id=generation_config._decoder_start_token_tensor, max_length=generation_config.max_length, ) # stash the delay mask so that we don't have to recompute in each forward pass model_kwargs["decoder_delay_pattern_mask"] = decoder_delay_pattern_mask # input_ids are ready to be placed on the streamer (if used) if streamer is not None: streamer.put(input_ids.cpu()) # 7. determine generation mode generation_mode = generation_config.get_generation_mode() # 8. prepare batched CFG externally (to enable coexistance with the unbatched CFG) if generation_config.guidance_scale is not None and generation_config.guidance_scale > 1: logits_processor.append(ClassifierFreeGuidanceLogitsProcessor(generation_config.guidance_scale)) generation_config.guidance_scale = None # 9. prepare distribution pre_processing samplers logits_processor = self._get_logits_processor( generation_config=generation_config, input_ids_seq_length=input_ids_length, encoder_input_ids=inputs_tensor, prefix_allowed_tokens_fn=None, logits_processor=logits_processor, device=input_ids.device, ) # 10. prepare stopping criteria stopping_criteria = self._get_stopping_criteria( generation_config=generation_config, stopping_criteria=stopping_criteria ) if generation_mode in (GenerationMode.SAMPLE, GenerationMode.GREEDY_SEARCH): # expand input_ids with `num_return_sequences` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids=input_ids, expand_size=generation_config.num_return_sequences, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs, ) # 11. run sample outputs = self._sample( input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria, generation_config=generation_config, synced_gpus=synced_gpus, streamer=streamer, **model_kwargs, ) else: raise ValueError( "Got incompatible mode for generation, should be one of greedy or sampling. " "Ensure that beam search is de-activated by setting `num_beams=1` and `num_beam_groups=1`." ) if generation_config.return_dict_in_generate: output_ids = outputs.sequences else: output_ids = outputs # apply the pattern mask to the final ids output_ids = self.decoder.apply_delay_pattern_mask(output_ids, model_kwargs["decoder_delay_pattern_mask"]) # revert the pattern delay mask by filtering the pad token id output_ids = output_ids[output_ids != generation_config._pad_token_tensor].reshape( batch_size, self.decoder.num_codebooks, -1 ) # append the frame dimension back to the audio codes output_ids = output_ids[None, ...] audio_scales = model_kwargs.get("audio_scales") if audio_scales is None: audio_scales = [None] * batch_size if self.decoder.config.audio_channels == 1: output_values = self.audio_encoder.decode( output_ids, audio_scales=audio_scales, ).audio_values else: codec_outputs_left = self.audio_encoder.decode(output_ids[:, :, ::2, :], audio_scales=audio_scales) output_values_left = codec_outputs_left.audio_values codec_outputs_right = self.audio_encoder.decode(output_ids[:, :, 1::2, :], audio_scales=audio_scales) output_values_right = codec_outputs_right.audio_values output_values = torch.cat([output_values_left, output_values_right], dim=1) if generation_config.return_dict_in_generate: outputs.sequences = output_values return outputs else: return output_values def get_unconditional_inputs(self, num_samples=1): """ Helper function to get null inputs for unconditional generation, enabling the model to be used without the feature extractor or tokenizer. Args: num_samples (int, *optional*): Number of audio samples to unconditionally generate. max_new_tokens (int, *optional*): Number of tokens to generate for each sample. More tokens means longer audio samples, at the expense of longer inference (since more audio tokens need to be generated per sample). Example: ```python >>> from transformers import MusicgenForConditionalGeneration >>> model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small") >>> # get the unconditional (or 'null') inputs for the model >>> unconditional_inputs = model.get_unconditional_inputs(num_samples=1) >>> audio_samples = model.generate(**unconditional_inputs, max_new_tokens=256) ```""" last_hidden_state = torch.zeros( (num_samples, 1, self.config.text_encoder.hidden_size), device=self.device, dtype=self.dtype ) attention_mask = torch.zeros((num_samples, 1), device=self.device, dtype=torch.long) return MusicgenUnconditionalInput( encoder_outputs=(last_hidden_state,), attention_mask=attention_mask, guidance_scale=1.0, ) __all__ = ["MusicgenForConditionalGeneration", "MusicgenForCausalLM", "MusicgenModel", "MusicgenPreTrainedModel"]
transformers/src/transformers/models/musicgen/modeling_musicgen.py/0
{ "file_path": "transformers/src/transformers/models/musicgen/modeling_musicgen.py", "repo_id": "transformers", "token_count": 57997 }
125
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Example for running: 0. Cp ckpts to local aws s3 cp --recursive s3://ai2-llm/checkpoints/OLMoE/olmoe-8x1b-newhp-newds-final-annealFrom1200000/step23842 /data/niklas/llm/checkpoints/olmoe-8x1b-newhp-newds-final-annealFrom1200000_step23842 1. Unshard your OLMoE checkpoint using https://github.com/allenai/OLMo/blob/7d63fe09d23cf23714da5aa633a44a90180195da/scripts/unshard.py python OLMo/scripts/unshard.py /data/niklas/llm/checkpoints/23485/step954000 /data/niklas/llm/checkpoints/1b-954000-unsharded --model-only python OLMo/scripts/unshard.py /data/niklas/llm/checkpoints/23485/step954000 /data/niklas/llm/checkpoints/1b-954000-unsharded --model-only python OLMo/scripts/unshard.py /data/niklas/llm/checkpoints/olmoe-8x1b-newhp-newds-final-annealFrom1200000_step23842 /data/niklas/llm/checkpoints/olmoe-8x1b-newhp-newds-final-annealFrom1200000_step23842-unsharded --model-only 2. Convert to transformers rm -rf olmoe; mkdir olmoe; python /data/niklas/transformers/src/transformers/models/olmoe/convert_olmoe_weights_to_hf.py --input_dir /data/niklas/llm/checkpoints/olmoe-8x1b-newhp-newds-final-annealFrom1200000_step23842-unsharded --tokenizer_json_path /data/niklas/llm/checkpoints/olmoe-step1200000-unsharded/tokenizer.json --output_dir olmoe 3. Load model via: ``` from transformers import OlmoeForCausalLM, AutoTokenizer import torch model = OlmoeForCausalLM.from_pretrained("../transformers/olmoe", torch_dtype=torch.bfloat16).cuda() model = OlmoeForCausalLM.from_pretrained("../transformers/olmoe").cuda() tokenizer = AutoTokenizer.from_pretrained("../transformers/olmoe") inputs = tokenizer("Bitcoin is", return_tensors="pt") inputs = {k: v.cuda() for k, v in inputs.items()} out = model.generate(**inputs, max_length=64) print(tokenizer.decode(out[0])) # > # Bitcoin is a digital currency that is created and held electronically. No one controls it. Bitcoins aren’t printed, like dollars or euros – they’re produced by people and businesses running computers all around the world, using software that solves mathematical # Or quick sanity check: o = model(torch.tensor([[0, 1]]).cuda()) # If the checkpoint is not converted to BF16 but kept in FP32: # > # Bitcoin is a digital currency that is not controlled by any central authority. It is a peer-to-peer payment system that allows users to send and receive payments from anywhere in the world. Bitcoin is also known as a cryptocurrency because it uses cryptography to secure transactions and prevent fraud. ``` Note: you need to be able to host the whole model in RAM to execute this script (even if the biggest versions come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM). Compare with OLMo codebase: ``` from olmo.model import OLMo import torch model = OLMo.from_checkpoint("/data/niklas/llm/checkpoints/olmoe-step1200000-unsharded-pt") model = model.cuda() model = model.to(torch.bfloat16) from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("../transformers/olmoe") inputs = tokenizer("Bitcoin is", return_tensors="pt") inputs = {k: v.cuda() for k, v in inputs.items()} out = model.generate(**inputs) print(tokenizer.decode(out[0][0][0])) # Bitcoin is a digital currency that is created and held electronically. No one controls it. Bitcoins aren’t printed, like dollars or euros – they’re produced by people and businesses running computers all around the world, using software that solves mathematical problems. It’s the first example of a growing category of money # Or quick sanity check: o = model(torch.tensor([[0, 1]]).cuda()) ``` """ import argparse import gc import json import os import shutil from pathlib import Path import torch import yaml from tokenizers import Tokenizer from transformers import OlmoeConfig, OlmoeForCausalLM from transformers.models.gpt_neox.tokenization_gpt_neox_fast import GPTNeoXTokenizerFast def compute_intermediate_size(n, ffn_dim_multiplier=1, multiple_of=256): return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3)) + multiple_of - 1) // multiple_of) def read_json(path): with open(path, "r") as f: return json.load(f) def write_json(text, path): with open(path, "w") as f: json.dump(text, f) def write_model(model_path, input_base_path, tokenizer_path=None, safe_serialization=True, fix_eos_token_id=True): os.makedirs(model_path, exist_ok=True) tmp_model_path = os.path.join(model_path, "tmp") os.makedirs(tmp_model_path, exist_ok=True) config_path = Path(input_base_path) / "config.yaml" olmoe_config = yaml.safe_load(config_path.read_text())["model"] if fix_eos_token_id: olmoe_config["eos_token_id"] = 50279 n_layers = olmoe_config["n_layers"] n_heads = olmoe_config["n_heads"] dim = olmoe_config["d_model"] dims_per_head = dim // n_heads base = 10000.0 inv_freq = 1.0 / (base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head)) max_position_embeddings = olmoe_config["max_sequence_length"] vocab_size = olmoe_config.get("embedding_size", olmoe_config["vocab_size"]) if olmoe_config.get("n_kv_heads", None) is not None: num_key_value_heads = olmoe_config["n_kv_heads"] # for GQA / MQA elif olmoe_config["multi_query_attention"]: # compatibility with other checkpoints num_key_value_heads = 1 else: num_key_value_heads = n_heads print(f"Fetching all parameters from the checkpoint at {input_base_path}.") # Not sharded loaded = torch.load(os.path.join(input_base_path, "model.pt"), map_location="cpu") param_count = 0 index_dict = {"weight_map": {}} for layer_i in range(n_layers): filename = f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin" fused_dims = [dim, dims_per_head * num_key_value_heads, dims_per_head * num_key_value_heads] q_proj_weight, k_proj_weight, v_proj_weight = torch.split( loaded[f"transformer.blocks.{layer_i}.att_proj.weight"], fused_dims, dim=0 ) state_dict = { f"model.layers.{layer_i}.self_attn.q_proj.weight": q_proj_weight, f"model.layers.{layer_i}.self_attn.k_proj.weight": k_proj_weight, f"model.layers.{layer_i}.self_attn.v_proj.weight": v_proj_weight, f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[f"transformer.blocks.{layer_i}.attn_out.weight"], f"model.layers.{layer_i}.self_attn.q_norm.weight": loaded[f"transformer.blocks.{layer_i}.q_norm.weight"], f"model.layers.{layer_i}.self_attn.k_norm.weight": loaded[f"transformer.blocks.{layer_i}.k_norm.weight"], f"model.layers.{layer_i}.mlp.gate.weight": loaded[f"transformer.blocks.{layer_i}.ffn.router.layer.weight"], f"model.layers.{layer_i}.input_layernorm.weight": loaded[f"transformer.blocks.{layer_i}.attn_norm.weight"], f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[ f"transformer.blocks.{layer_i}.ff_norm.weight" ], } num_experts = loaded[f"transformer.blocks.{layer_i}.ffn.router.layer.weight"].shape[0] dim_per_expert = loaded[f"transformer.blocks.{layer_i}.ffn.experts.mlp.w1"].shape[0] // num_experts for expert_i in range(num_experts): state_dict[f"model.layers.{layer_i}.mlp.experts.{expert_i}.gate_proj.weight"] = loaded[ f"transformer.blocks.{layer_i}.ffn.experts.mlp.w1" ][dim_per_expert * expert_i : dim_per_expert * (expert_i + 1), :] state_dict[f"model.layers.{layer_i}.mlp.experts.{expert_i}.up_proj.weight"] = loaded[ f"transformer.blocks.{layer_i}.ffn.experts.mlp.v1" ][dim_per_expert * expert_i : dim_per_expert * (expert_i + 1), :] state_dict[f"model.layers.{layer_i}.mlp.experts.{expert_i}.down_proj.weight"] = loaded[ f"transformer.blocks.{layer_i}.ffn.experts.mlp.w2" ][dim_per_expert * expert_i : dim_per_expert * (expert_i + 1), :].T.contiguous() state_dict[f"model.layers.{layer_i}.self_attn.rotary_emb.inv_freq"] = inv_freq for k, v in state_dict.items(): index_dict["weight_map"][k] = filename param_count += v.numel() torch.save(state_dict, os.path.join(tmp_model_path, filename)) filename = f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin" # Unsharded state_dict = { "model.embed_tokens.weight": loaded["transformer.wte.weight"], "lm_head.weight": loaded["transformer.ff_out.weight"], "model.norm.weight": loaded["transformer.ln_f.weight"], } for k, v in state_dict.items(): index_dict["weight_map"][k] = filename param_count += v.numel() torch.save(state_dict, os.path.join(tmp_model_path, filename)) # Write configs index_dict["metadata"] = {"total_size": param_count * 2} write_json(index_dict, os.path.join(tmp_model_path, "pytorch_model.bin.index.json")) config = OlmoeConfig( vocab_size=vocab_size, hidden_size=dim, intermediate_size=dim_per_expert, num_hidden_layers=n_layers, num_attention_heads=n_heads, num_key_value_heads=num_key_value_heads, max_position_embeddings=max_position_embeddings, pad_token_id=olmoe_config["pad_token_id"], bos_token_id=None, eos_token_id=olmoe_config["eos_token_id"], tie_word_embeddings=olmoe_config["weight_tying"], rope_theta=base, clip_qkv=olmoe_config.get("clip_qkv"), ) config.save_pretrained(tmp_model_path) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() if tokenizer_path is not None: _write_tokenizer(model_path, config, tokenizer_path, fix_eos_token_id) print("Loading the checkpoint in a OLMoE model.") model = OlmoeForCausalLM.from_pretrained(tmp_model_path, torch_dtype=torch.bfloat16) # Avoid saving this as part of the config. del model.config._name_or_path print("Saving in the Transformers format.") model.save_pretrained(model_path, safe_serialization=safe_serialization) shutil.rmtree(tmp_model_path) def _write_tokenizer( output_path: Path, config: OlmoeConfig, input_tokenizer_path: Path, fix_eos_token_id: bool = True ) -> None: print(f"Saving a {GPTNeoXTokenizerFast.__name__} to {output_path}.") base_tokenizer = Tokenizer.from_file(str(input_tokenizer_path)) eos_token_id = config.eos_token_id if config.eos_token_id is not None else base_tokenizer.get_vocab_size() - 1 pad_token_id = config.pad_token_id if config.pad_token_id is not None else eos_token_id if fix_eos_token_id and eos_token_id == 0: # Fixing a bug in OLMo where eos token id was incorrectly set print("Changing eos_token_id from 0 to 50279.") eos_token_id = 50279 tokenizer = GPTNeoXTokenizerFast( tokenizer_object=base_tokenizer, eos_token=base_tokenizer.decode([eos_token_id], skip_special_tokens=False), pad_token=base_tokenizer.decode([pad_token_id], skip_special_tokens=False), unk_token=None, bos_token=None, ) tokenizer.save_pretrained(output_path) def main(): parser = argparse.ArgumentParser() parser.add_argument( "--input_dir", required=True, help="Location of OLMoE weights, which contains config.yaml and model.pt.", ) parser.add_argument( "--tokenizer_json_path", default=None, help="Location of OLMoE tokenizer json file.", ) parser.add_argument( "--output_dir", required=True, help="Location to write HF model and tokenizer", ) parser.add_argument( "--no_fix_eos_token_id", action="store_false", dest="fix_eos_token_id", help="If set, does not change eos token id from 0 to 50279 if it is 0. Changing 0 to 50279 is a bug fix, so use this option with care.", ) parser.add_argument( "--safe_serialization", type=bool, default=True, help="Whether or not to save using `safetensors`." ) args = parser.parse_args() write_model( model_path=args.output_dir, input_base_path=args.input_dir, safe_serialization=args.safe_serialization, tokenizer_path=args.tokenizer_json_path, fix_eos_token_id=args.fix_eos_token_id, ) if __name__ == "__main__": main()
transformers/src/transformers/models/olmoe/convert_olmoe_weights_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/olmoe/convert_olmoe_weights_to_hf.py", "repo_id": "transformers", "token_count": 5305 }
126
# coding=utf-8 # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch OpenAI GPT model.""" import json import math import os from dataclasses import dataclass from typing import Any, Dict, Optional, Tuple, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import gelu_new, silu from ...generation import GenerationMixin from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput from ...modeling_utils import PreTrainedModel, SequenceSummary from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_openai import OpenAIGPTConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "openai-community/openai-gpt" _CONFIG_FOR_DOC = "OpenAIGPTConfig" def load_tf_weights_in_openai_gpt(model, config, openai_checkpoint_folder_path): """Load tf pre-trained weights in a pytorch model (from NumPy arrays here)""" import re import numpy as np if ".ckpt" in openai_checkpoint_folder_path: openai_checkpoint_folder_path = os.path.dirname(openai_checkpoint_folder_path) logger.info(f"Loading weights from {openai_checkpoint_folder_path}") with open(openai_checkpoint_folder_path + "/parameters_names.json", "r", encoding="utf-8") as names_handle: names = json.load(names_handle) with open(openai_checkpoint_folder_path + "/params_shapes.json", "r", encoding="utf-8") as shapes_handle: shapes = json.load(shapes_handle) offsets = np.cumsum([np.prod(shape) for shape in shapes]) init_params = [np.load(openai_checkpoint_folder_path + f"/params_{n}.npy") for n in range(10)] init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1] init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)] # This was used when we had a single embedding matrix for positions and tokens # init_params[0] = np.concatenate([init_params[1], init_params[0]], 0) # del init_params[1] init_params = [arr.squeeze() for arr in init_params] # Check that the token and position embeddings weight dimensions map those of the init parameters. if model.tokens_embed.weight.shape != init_params[1].shape: raise ValueError( f"tokens_embed.weight.shape: {model.tokens_embed.weight.shape} does not match init_param[1].shape:" f" {init_params[1].shape}" ) if model.positions_embed.weight.shape != init_params[0].shape: raise ValueError( f"positions_embed.weight.shape: {model.positions_embed.weight.shape} does not match init_param[0].shape:" f" {init_params[0].shape}" ) model.tokens_embed.weight.data = torch.from_numpy(init_params[1]) model.positions_embed.weight.data = torch.from_numpy(init_params[0]) names.pop(0) # Pop position and token embedding arrays init_params.pop(0) init_params.pop(0) for name, array in zip(names, init_params): # names[1:n_transfer], init_params[1:n_transfer]): name = name[6:] # skip "model/" if name[-2:] != ":0": raise ValueError(f"Layer {name} does not end with :0") name = name[:-2] name = name.split("/") pointer = model for m_name in name: if re.fullmatch(r"[A-Za-z]+\d+", m_name): scope_names = re.split(r"(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "g": pointer = getattr(pointer, "weight") elif scope_names[0] == "b": pointer = getattr(pointer, "bias") elif scope_names[0] == "w": pointer = getattr(pointer, "weight") else: pointer = getattr(pointer, scope_names[0]) if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] # Ensure that the pointer and array have compatible shapes. if pointer.shape != array.shape: raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched") logger.info(f"Initialize PyTorch weight {name}") pointer.data = torch.from_numpy(array) return model ACT_FNS = {"relu": nn.ReLU(), "silu": silu, "gelu": gelu_new, "swish": silu} class Attention(nn.Module): def __init__(self, nx, n_positions, config, scale=False): super().__init__() n_state = nx # in Attention: n_state=768 (nx=n_embd) # [switch nx => n_state from Block to Attention to keep identical to TF implementation] if n_state % config.n_head != 0: raise ValueError(f"Attention n_state shape: {n_state} must be divisible by config.n_head {config.n_head}") self.register_buffer( "bias", torch.tril(torch.ones(n_positions, n_positions)).view(1, 1, n_positions, n_positions), persistent=False, ) self.n_head = config.n_head self.split_size = n_state self.scale = scale self.c_attn = Conv1D(n_state * 3, nx) self.c_proj = Conv1D(n_state, nx) self.attn_dropout = nn.Dropout(config.attn_pdrop) self.resid_dropout = nn.Dropout(config.resid_pdrop) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.n_head, self.split_size // self.n_head, self.pruned_heads ) index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)]) # Prune conv1d layers self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1) self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0) # Update hyper params self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads)) self.n_head = self.n_head - len(heads) self.pruned_heads = self.pruned_heads.union(heads) def _attn(self, q, k, v, attention_mask=None, head_mask=None, output_attentions=False): w = torch.matmul(q, k) if self.scale: w = w / math.sqrt(v.size(-1)) # w = w * self.bias + -1e9 * (1 - self.bias) # TF implementation method: mask_attn_weights # XD: self.b may be larger than w, so we need to crop it b = self.bias[:, :, : w.size(-2), : w.size(-1)] w = w * b + -1e4 * (1 - b) if attention_mask is not None: # Apply the attention mask w = w + attention_mask w = nn.functional.softmax(w, dim=-1) w = self.attn_dropout(w) # Mask heads if we want to if head_mask is not None: w = w * head_mask outputs = [torch.matmul(w, v)] if output_attentions: outputs.append(w) return outputs def merge_heads(self, x): x = x.permute(0, 2, 1, 3).contiguous() new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),) return x.view(*new_x_shape) # in Tensorflow implementation: fct merge_states def split_heads(self, x, k=False): new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head) x = x.view(*new_x_shape) # in Tensorflow implementation: fct split_states if k: return x.permute(0, 2, 3, 1) else: return x.permute(0, 2, 1, 3) def forward(self, x, attention_mask=None, head_mask=None, output_attentions=False): x = self.c_attn(x) query, key, value = x.split(self.split_size, dim=2) query = self.split_heads(query) key = self.split_heads(key, k=True) value = self.split_heads(value) attn_outputs = self._attn(query, key, value, attention_mask, head_mask, output_attentions) a = attn_outputs[0] a = self.merge_heads(a) a = self.c_proj(a) a = self.resid_dropout(a) outputs = [a] + attn_outputs[1:] return outputs # a, (attentions) class MLP(nn.Module): def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd) super().__init__() nx = config.n_embd self.c_fc = Conv1D(n_state, nx) self.c_proj = Conv1D(nx, n_state) self.act = ACT_FNS[config.afn] self.dropout = nn.Dropout(config.resid_pdrop) def forward(self, x): h = self.act(self.c_fc(x)) h2 = self.c_proj(h) return self.dropout(h2) class Block(nn.Module): def __init__(self, n_positions, config, scale=False): super().__init__() nx = config.n_embd self.attn = Attention(nx, n_positions, config, scale) self.ln_1 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon) self.mlp = MLP(4 * nx, config) self.ln_2 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon) def forward(self, x, attention_mask=None, head_mask=None, output_attentions=False): attn_outputs = self.attn( x, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, ) a = attn_outputs[0] n = self.ln_1(x + a) m = self.mlp(n) h = self.ln_2(n + m) outputs = [h] + attn_outputs[1:] return outputs class OpenAIGPTPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = OpenAIGPTConfig load_tf_weights = load_tf_weights_in_openai_gpt base_model_prefix = "transformer" def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, (nn.Linear, Conv1D)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) @dataclass class OpenAIGPTDoubleHeadsModelOutput(ModelOutput): """ Base class for outputs of models predicting if two sentences are consecutive or not. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss. mc_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mc_labels` is provided): Multiple choice classification loss. logits (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). mc_logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`): Prediction scores of the multiple choice classification head (scores for each choice before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None mc_loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None mc_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None OPENAI_GPT_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`OpenAIGPTConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ OPENAI_GPT_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare OpenAI GPT transformer model outputting raw hidden-states without any specific head on top.", OPENAI_GPT_START_DOCSTRING, ) class OpenAIGPTModel(OpenAIGPTPreTrainedModel): def __init__(self, config): super().__init__(config) self.tokens_embed = nn.Embedding(config.vocab_size, config.n_embd) self.positions_embed = nn.Embedding(config.n_positions, config.n_embd) self.drop = nn.Dropout(config.embd_pdrop) self.h = nn.ModuleList([Block(config.n_positions, config, scale=True) for _ in range(config.n_layer)]) self.register_buffer("position_ids", torch.arange(config.n_positions), persistent=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.tokens_embed def set_input_embeddings(self, new_embeddings): self.tokens_embed = new_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} """ for layer, heads in heads_to_prune.items(): self.h[layer].attn.prune_heads(heads) @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if position_ids is None: # Code is different from when we had a single embedding matrix from position and token embeddings position_ids = self.position_ids[None, : input_shape[-1]] # Attention mask. if attention_mask is not None: # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and the dtype's smallest value for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min # Prepare head mask if needed head_mask = self.get_head_mask(head_mask, self.config.n_layer) if inputs_embeds is None: inputs_embeds = self.tokens_embed(input_ids) position_embeds = self.positions_embed(position_ids) if token_type_ids is not None: token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) token_type_embeds = self.tokens_embed(token_type_ids) else: token_type_embeds = 0 hidden_states = inputs_embeds + position_embeds + token_type_embeds hidden_states = self.drop(hidden_states) output_shape = input_shape + (hidden_states.size(-1),) all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for i, block in enumerate(self.h): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = block(hidden_states, attention_mask, head_mask[i], output_attentions=output_attentions) hidden_states = outputs[0] if output_attentions: all_attentions = all_attentions + (outputs[1],) hidden_states = hidden_states.view(*output_shape) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions, ) @add_start_docstrings( """ OpenAI GPT Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """, OPENAI_GPT_START_DOCSTRING, ) class OpenAIGPTLMHeadModel(OpenAIGPTPreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): super().__init__(config) self.transformer = OpenAIGPTModel(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], CausalLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return CausalLMOutput( loss=loss, logits=lm_logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) def prepare_inputs_for_generation(self, input_ids: torch.LongTensor, **kwargs) -> Dict[str, Any]: # Overwritten -- old model with reduced inputs return {"input_ids": input_ids} @add_start_docstrings( """ OpenAI GPT Model transformer with a language modeling and a multiple-choice classification head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the input embeddings, the classification head takes as input the input of a specified classification token index in the input sequence). """, OPENAI_GPT_START_DOCSTRING, ) class OpenAIGPTDoubleHeadsModel(OpenAIGPTPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): super().__init__(config) config.num_labels = 1 self.transformer = OpenAIGPTModel(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) self.multiple_choice_head = SequenceSummary(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=OpenAIGPTDoubleHeadsModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, mc_token_ids: Optional[torch.LongTensor] = None, labels: Optional[torch.LongTensor] = None, mc_labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], OpenAIGPTDoubleHeadsModelOutput]: r""" mc_token_ids (`torch.LongTensor` of shape `(batch_size, num_choices)`, *optional*, default to index of the last token of the input): Index of the classification token in each input sequence. Selected in the range `[0, input_ids.size(-1) - 1]`. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-1, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` mc_labels (`torch.LongTensor` of shape `(batch_size)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]` where *num_choices* is the size of the second dimension of the input tensors. (see *input_ids* above) Return: Examples: ```python >>> from transformers import AutoTokenizer, OpenAIGPTDoubleHeadsModel >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/openai-gpt") >>> model = OpenAIGPTDoubleHeadsModel.from_pretrained("openai-community/openai-gpt") >>> tokenizer.add_special_tokens( ... {"cls_token": "[CLS]"} ... ) # Add a [CLS] to the vocabulary (we should train it also!) >>> model.resize_token_embeddings(len(tokenizer)) >>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"] >>> input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices >>> mc_token_ids = torch.tensor([input_ids.size(-1) - 1, input_ids.size(-1) - 1]).unsqueeze(0) # Batch size 1 >>> outputs = model(input_ids, mc_token_ids=mc_token_ids) >>> lm_logits = outputs.logits >>> mc_logits = outputs.mc_logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1) lm_loss, mc_loss = None, None if mc_labels is not None: loss_fct = CrossEntropyLoss() mc_loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1)) if labels is not None: shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) if not return_dict: output = (lm_logits, mc_logits) + transformer_outputs[1:] if mc_loss is not None: output = (mc_loss,) + output return ((lm_loss,) + output) if lm_loss is not None else output return OpenAIGPTDoubleHeadsModelOutput( loss=lm_loss, mc_loss=mc_loss, logits=lm_logits, mc_logits=mc_logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @add_start_docstrings( """ The Original OpenAI GPT Model transformer with a sequence classification head on top (linear layer). [`OpenAIGPTForSequenceClassification`] uses the last token in order to do the classification, as other causal models (e.g. GPT-2) do. Since it does classification on the last token, it requires to know the position of the last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in each row of the batch). """, OPENAI_GPT_START_DOCSTRING, ) class OpenAIGPTForSequenceClassification(OpenAIGPTPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = OpenAIGPTModel(config) self.score = nn.Linear(config.n_embd, self.num_labels, bias=False) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size, sequence_length = input_ids.shape[:2] else: batch_size, sequence_length = inputs_embeds.shape[:2] # Ensure the batch size is > 1 if there is no padding. if self.config.pad_token_id is None and batch_size != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") if self.config.pad_token_id is None: sequence_lengths = -1 else: if input_ids is not None: # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 sequence_lengths = sequence_lengths % input_ids.shape[-1] sequence_lengths = sequence_lengths.to(logits.device) else: sequence_lengths = -1 logger.warning_once( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) pooled_logits = logits[range(batch_size), sequence_lengths] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=pooled_logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) __all__ = [ "OpenAIGPTDoubleHeadsModel", "OpenAIGPTForSequenceClassification", "OpenAIGPTLMHeadModel", "OpenAIGPTModel", "OpenAIGPTPreTrainedModel", "load_tf_weights_in_openai_gpt", ]
transformers/src/transformers/models/openai/modeling_openai.py/0
{ "file_path": "transformers/src/transformers/models/openai/modeling_openai.py", "repo_id": "transformers", "token_count": 16382 }
127
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Perceiver checkpoints originally implemented in Haiku.""" import argparse import json import pickle from pathlib import Path import haiku as hk import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( PerceiverConfig, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverImageProcessor, PerceiverTokenizer, ) from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def prepare_img(): # We will verify our results on an image of a dog url = "https://storage.googleapis.com/perceiver_io/dalmation.jpg" im = Image.open(requests.get(url, stream=True).raw) return im def rename_keys(state_dict, architecture): for name in list(state_dict): param = state_dict.pop(name) # PREPROCESSORS # rename text preprocessor embeddings (for MLM model) name = name.replace("embed/embeddings", "input_preprocessor.embeddings.weight") if name.startswith("trainable_position_encoding/pos_embs"): name = name.replace( "trainable_position_encoding/pos_embs", "input_preprocessor.position_embeddings.weight" ) # rename image preprocessor embeddings (for image classification model with learned position embeddings) name = name.replace("image_preprocessor/~/conv2_d/w", "input_preprocessor.convnet_1x1.weight") name = name.replace("image_preprocessor/~/conv2_d/b", "input_preprocessor.convnet_1x1.bias") name = name.replace( "image_preprocessor/~_build_network_inputs/trainable_position_encoding/pos_embs", "input_preprocessor.position_embeddings.position_embeddings", ) name = name.replace( "image_preprocessor/~_build_network_inputs/position_encoding_projector/linear/w", "input_preprocessor.positions_projection.weight", ) name = name.replace( "image_preprocessor/~_build_network_inputs/position_encoding_projector/linear/b", "input_preprocessor.positions_projection.bias", ) # rename image preprocessor embeddings (for image classification model with conv processing) if "counter" in name or "hidden" in name: continue name = name.replace( "image_preprocessor/~/conv2_d_downsample/~/conv/w", "input_preprocessor.convnet.conv.weight" ) name = name.replace( "image_preprocessor/~/conv2_d_downsample/~/batchnorm/offset", "input_preprocessor.convnet.batchnorm.bias" ) name = name.replace( "image_preprocessor/~/conv2_d_downsample/~/batchnorm/scale", "input_preprocessor.convnet.batchnorm.weight" ) name = name.replace( "image_preprocessor/~/conv2_d_downsample/~/batchnorm/~/mean_ema/average", "input_preprocessor.convnet.batchnorm.running_mean", ) name = name.replace( "image_preprocessor/~/conv2_d_downsample/~/batchnorm/~/var_ema/average", "input_preprocessor.convnet.batchnorm.running_var", ) # rename image preprocessor embeddings (for optical flow model) name = name.replace("image_preprocessor/patches_linear/b", "input_preprocessor.conv_after_patches.bias") name = name.replace("image_preprocessor/patches_linear/w", "input_preprocessor.conv_after_patches.weight") # rename multimodal preprocessor embeddings name = name.replace("multimodal_preprocessor/audio_mask_token/pos_embs", "input_preprocessor.mask.audio") name = name.replace("multimodal_preprocessor/audio_padding/pos_embs", "input_preprocessor.padding.audio") name = name.replace("multimodal_preprocessor/image_mask_token/pos_embs", "input_preprocessor.mask.image") name = name.replace("multimodal_preprocessor/image_padding/pos_embs", "input_preprocessor.padding.image") name = name.replace("multimodal_preprocessor/label_mask_token/pos_embs", "input_preprocessor.mask.label") name = name.replace("multimodal_preprocessor/label_padding/pos_embs", "input_preprocessor.padding.label") # DECODERS # rename prefix of decoders # multimodal autoencoding model name = name.replace( "multimodal_decoder/~/basic_decoder/cross_attention/", "decoder.decoder.decoding_cross_attention." ) name = name.replace("multimodal_decoder/~decoder_query/audio_padding/pos_embs", "decoder.padding.audio") name = name.replace("multimodal_decoder/~decoder_query/image_padding/pos_embs", "decoder.padding.image") name = name.replace("multimodal_decoder/~decoder_query/label_padding/pos_embs", "decoder.padding.label") name = name.replace("multimodal_decoder/~/basic_decoder/output/b", "decoder.decoder.final_layer.bias") name = name.replace("multimodal_decoder/~/basic_decoder/output/w", "decoder.decoder.final_layer.weight") if architecture == "multimodal_autoencoding": name = name.replace( "classification_decoder/~/basic_decoder/~/trainable_position_encoding/pos_embs", "decoder.modalities.label.decoder.output_position_encodings.position_embeddings", ) # flow model name = name.replace( "flow_decoder/~/basic_decoder/cross_attention/", "decoder.decoder.decoding_cross_attention." ) name = name.replace("flow_decoder/~/basic_decoder/output/w", "decoder.decoder.final_layer.weight") name = name.replace("flow_decoder/~/basic_decoder/output/b", "decoder.decoder.final_layer.bias") # image models name = name.replace( "classification_decoder/~/basic_decoder/~/trainable_position_encoding/pos_embs", "decoder.decoder.output_position_encodings.position_embeddings", ) name = name.replace( "basic_decoder/~/trainable_position_encoding/pos_embs", "decoder.output_position_encodings.position_embeddings", ) name = name.replace( "classification_decoder/~/basic_decoder/cross_attention/", "decoder.decoder.decoding_cross_attention." ) name = name.replace("classification_decoder/~/basic_decoder/output/b", "decoder.decoder.final_layer.bias") name = name.replace("classification_decoder/~/basic_decoder/output/w", "decoder.decoder.final_layer.weight") name = name = name.replace("classification_decoder/~/basic_decoder/~/", "decoder.decoder.") name = name.replace("basic_decoder/cross_attention/", "decoder.decoding_cross_attention.") name = name.replace("basic_decoder/~/", "decoder.") # POSTPROCESSORS name = name.replace( "projection_postprocessor/linear/b", "output_postprocessor.modalities.image.classifier.bias" ) name = name.replace( "projection_postprocessor/linear/w", "output_postprocessor.modalities.image.classifier.weight" ) name = name.replace( "classification_postprocessor/linear/b", "output_postprocessor.modalities.label.classifier.bias" ) name = name.replace( "classification_postprocessor/linear/w", "output_postprocessor.modalities.label.classifier.weight" ) name = name.replace("audio_postprocessor/linear/b", "output_postprocessor.modalities.audio.classifier.bias") name = name.replace("audio_postprocessor/linear/w", "output_postprocessor.modalities.audio.classifier.weight") # PERCEIVER MODEL # rename latent embeddings name = name.replace("perceiver_encoder/~/trainable_position_encoding/pos_embs", "embeddings.latents") # rename latent embeddings (for multimodal model) name = name.replace("encoder/~/trainable_position_encoding/pos_embs", "embeddings.latents") # rename prefixes if name.startswith("perceiver_encoder/~/"): if "self_attention" in name: suffix = "self_attends." else: suffix = "" name = name.replace("perceiver_encoder/~/", "encoder." + suffix) if name.startswith("encoder/~/"): if "self_attention" in name: suffix = "self_attends." else: suffix = "" name = name.replace("encoder/~/", "encoder." + suffix) # rename layernorm parameters if "offset" in name: name = name.replace("offset", "bias") if "scale" in name: name = name.replace("scale", "weight") # in HuggingFace, the layernorm in between attention + MLP is just called "layernorm" # rename layernorm in between attention + MLP of cross-attention if "cross_attention" in name and "layer_norm_2" in name: name = name.replace("layer_norm_2", "layernorm") # rename layernorm in between attention + MLP of self-attention if "self_attention" in name and "layer_norm_1" in name: name = name.replace("layer_norm_1", "layernorm") # in HuggingFace, the layernorms for queries + keys are called "layernorm1" and "layernorm2" if "cross_attention" in name and "layer_norm_1" in name: name = name.replace("layer_norm_1", "attention.self.layernorm2") if "cross_attention" in name and "layer_norm" in name: name = name.replace("layer_norm", "attention.self.layernorm1") if "self_attention" in name and "layer_norm" in name: name = name.replace("layer_norm", "attention.self.layernorm1") # rename special characters by dots name = name.replace("-", ".") name = name.replace("/", ".") # rename keys, queries, values and output of attention layers if ("cross_attention" in name or "self_attention" in name) and "mlp" not in name: if "linear.b" in name: name = name.replace("linear.b", "self.query.bias") if "linear.w" in name: name = name.replace("linear.w", "self.query.weight") if "linear_1.b" in name: name = name.replace("linear_1.b", "self.key.bias") if "linear_1.w" in name: name = name.replace("linear_1.w", "self.key.weight") if "linear_2.b" in name: name = name.replace("linear_2.b", "self.value.bias") if "linear_2.w" in name: name = name.replace("linear_2.w", "self.value.weight") if "linear_3.b" in name: name = name.replace("linear_3.b", "output.dense.bias") if "linear_3.w" in name: name = name.replace("linear_3.w", "output.dense.weight") if "self_attention_" in name: name = name.replace("self_attention_", "") if "self_attention" in name: name = name.replace("self_attention", "0") # rename dense layers of 2-layer MLP if "mlp" in name: if "linear.b" in name: name = name.replace("linear.b", "dense1.bias") if "linear.w" in name: name = name.replace("linear.w", "dense1.weight") if "linear_1.b" in name: name = name.replace("linear_1.b", "dense2.bias") if "linear_1.w" in name: name = name.replace("linear_1.w", "dense2.weight") # finally, TRANSPOSE if kernel and not embedding layer, and set value if name[-6:] == "weight" and "embeddings" not in name: param = np.transpose(param) # if batchnorm, we need to squeeze it if "batchnorm" in name: param = np.squeeze(param) if "embedding_decoder" not in name: state_dict["perceiver." + name] = torch.from_numpy(param) else: state_dict[name] = torch.from_numpy(param) @torch.no_grad() def convert_perceiver_checkpoint(pickle_file, pytorch_dump_folder_path, architecture="MLM"): """ Copy/paste/tweak model's weights to our Perceiver structure. """ # load parameters as FlatMapping data structure with open(pickle_file, "rb") as f: checkpoint = pickle.loads(f.read()) state = None if isinstance(checkpoint, dict) and architecture in [ "image_classification", "image_classification_fourier", "image_classification_conv", ]: # the image classification_conv checkpoint also has batchnorm states (running_mean and running_var) params = checkpoint["params"] state = checkpoint["state"] else: params = checkpoint # turn into initial state dict state_dict = {} for scope_name, parameters in hk.data_structures.to_mutable_dict(params).items(): for param_name, param in parameters.items(): state_dict[scope_name + "/" + param_name] = param if state is not None: # add state variables for scope_name, parameters in hk.data_structures.to_mutable_dict(state).items(): for param_name, param in parameters.items(): state_dict[scope_name + "/" + param_name] = param # rename keys rename_keys(state_dict, architecture=architecture) # load HuggingFace model config = PerceiverConfig() subsampling = None repo_id = "huggingface/label-files" if architecture == "MLM": config.qk_channels = 8 * 32 config.v_channels = 1280 model = PerceiverForMaskedLM(config) elif "image_classification" in architecture: config.num_latents = 512 config.d_latents = 1024 config.d_model = 512 config.num_blocks = 8 config.num_self_attends_per_block = 6 config.num_cross_attention_heads = 1 config.num_self_attention_heads = 8 config.qk_channels = None config.v_channels = None # set labels config.num_labels = 1000 filename = "imagenet-1k-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} if architecture == "image_classification": config.image_size = 224 model = PerceiverForImageClassificationLearned(config) elif architecture == "image_classification_fourier": config.d_model = 261 model = PerceiverForImageClassificationFourier(config) elif architecture == "image_classification_conv": config.d_model = 322 model = PerceiverForImageClassificationConvProcessing(config) else: raise ValueError(f"Architecture {architecture} not supported") elif architecture == "optical_flow": config.num_latents = 2048 config.d_latents = 512 config.d_model = 322 config.num_blocks = 1 config.num_self_attends_per_block = 24 config.num_self_attention_heads = 16 config.num_cross_attention_heads = 1 model = PerceiverForOpticalFlow(config) elif architecture == "multimodal_autoencoding": config.num_latents = 28 * 28 * 1 config.d_latents = 512 config.d_model = 704 config.num_blocks = 1 config.num_self_attends_per_block = 8 config.num_self_attention_heads = 8 config.num_cross_attention_heads = 1 config.num_labels = 700 # define dummy inputs + subsampling (as each forward pass is only on a chunk of image + audio data) images = torch.randn((1, 16, 3, 224, 224)) audio = torch.randn((1, 30720, 1)) nchunks = 128 image_chunk_size = np.prod((16, 224, 224)) // nchunks audio_chunk_size = audio.shape[1] // config.samples_per_patch // nchunks # process the first chunk chunk_idx = 0 subsampling = { "image": torch.arange(image_chunk_size * chunk_idx, image_chunk_size * (chunk_idx + 1)), "audio": torch.arange(audio_chunk_size * chunk_idx, audio_chunk_size * (chunk_idx + 1)), "label": None, } model = PerceiverForMultimodalAutoencoding(config) # set labels filename = "kinetics700-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} else: raise ValueError(f"Architecture {architecture} not supported") model.eval() # load weights model.load_state_dict(state_dict) # prepare dummy input input_mask = None if architecture == "MLM": tokenizer = PerceiverTokenizer.from_pretrained("/Users/NielsRogge/Documents/Perceiver/Tokenizer files") text = "This is an incomplete sentence where some words are missing." encoding = tokenizer(text, padding="max_length", return_tensors="pt") # mask " missing.". Note that the model performs much better if the masked chunk starts with a space. encoding.input_ids[0, 51:60] = tokenizer.mask_token_id inputs = encoding.input_ids input_mask = encoding.attention_mask elif architecture in ["image_classification", "image_classification_fourier", "image_classification_conv"]: image_processor = PerceiverImageProcessor() image = prepare_img() encoding = image_processor(image, return_tensors="pt") inputs = encoding.pixel_values elif architecture == "optical_flow": inputs = torch.randn(1, 2, 27, 368, 496) elif architecture == "multimodal_autoencoding": images = torch.randn((1, 16, 3, 224, 224)) audio = torch.randn((1, 30720, 1)) inputs = {"image": images, "audio": audio, "label": torch.zeros((images.shape[0], 700))} # forward pass if architecture == "multimodal_autoencoding": outputs = model(inputs=inputs, attention_mask=input_mask, subsampled_output_points=subsampling) else: outputs = model(inputs=inputs, attention_mask=input_mask) logits = outputs.logits # verify logits if not isinstance(logits, dict): print("Shape of logits:", logits.shape) else: for k, v in logits.items(): print(f"Shape of logits of modality {k}", v.shape) if architecture == "MLM": expected_slice = torch.tensor( [[-11.8336, -11.6850, -11.8483], [-12.8149, -12.5863, -12.7904], [-12.8440, -12.6410, -12.8646]] ) assert torch.allclose(logits[0, :3, :3], expected_slice) masked_tokens_predictions = logits[0, 51:60].argmax(dim=-1).tolist() expected_list = [38, 115, 111, 121, 121, 111, 116, 109, 52] assert masked_tokens_predictions == expected_list print("Greedy predictions:") print(masked_tokens_predictions) print() print("Predicted string:") print(tokenizer.decode(masked_tokens_predictions)) elif architecture in ["image_classification", "image_classification_fourier", "image_classification_conv"]: print("Predicted class:", model.config.id2label[logits.argmax(-1).item()]) # Finally, save files Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--pickle_file", type=str, default=None, required=True, help="Path to local pickle file of a Perceiver checkpoint you'd like to convert.\n" "Given the files are in the pickle format, please be wary of passing it files you trust.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model directory, provided as a string.", ) parser.add_argument( "--architecture", default="MLM", type=str, help=""" Architecture, provided as a string. One of 'MLM', 'image_classification', image_classification_fourier', image_classification_fourier', 'optical_flow' or 'multimodal_autoencoding'. """, ) args = parser.parse_args() convert_perceiver_checkpoint(args.pickle_file, args.pytorch_dump_folder_path, args.architecture)
transformers/src/transformers/models/perceiver/convert_perceiver_haiku_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/perceiver/convert_perceiver_haiku_to_pytorch.py", "repo_id": "transformers", "token_count": 9032 }
128
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for Pixtral.""" import math from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( resize, to_channel_dimension_format, ) from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, is_valid_image, to_numpy_array, valid_images, validate_kwargs, validate_preprocess_arguments, ) from ...utils import TensorType, is_torch_device, is_torch_dtype, is_vision_available, logging from ...utils.import_utils import requires_backends logger = logging.get_logger(__name__) if is_vision_available(): import PIL class BatchMixFeature(BatchFeature): def to(self, *args, **kwargs) -> "BatchMixFeature": """ Send all values to device by calling `v.to(*args, **kwargs)` (PyTorch only). This should support casting in different `dtypes` and sending the `BatchFeature` to a different `device`. Args: args (`Tuple`): Will be passed to the `to(...)` function of the tensors. kwargs (`Dict`, *optional*): Will be passed to the `to(...)` function of the tensors. Returns: [`BatchFeature`]: The same instance after modification. """ def _recursive_to(obj, device, *args, **kwargs): # Lists can be nested, so keep digging until we hit tensors if isinstance(obj, list): return [_recursive_to(o, device, *args, **kwargs) for o in obj] # We cast only floating point tensors to avoid issues with tokenizers casting `LongTensor` to `FloatTensor` elif isinstance(obj, torch.Tensor) and torch.is_floating_point(obj): # cast and send to device return obj.to(*args, **kwargs) elif isinstance(obj, torch.Tensor) and device is not None: # only send to device, don't cast return obj.to(device=device) else: return obj requires_backends(self, ["torch"]) import torch # noqa device = kwargs.get("device") # Check if the args are a device or a dtype if device is None and len(args) > 0: # device should be always the first argument arg = args[0] if is_torch_dtype(arg): # The first argument is a dtype pass elif isinstance(arg, str) or is_torch_device(arg) or isinstance(arg, int): device = arg else: # it's something else raise ValueError(f"Attempting to cast a BatchFeature to type {str(arg)}. This is not supported.") self.data = {k: _recursive_to(v, device, *args, **kwargs) for k, v in self.data.items()} return self # Copied from transformers.models.idefics2.image_processing_idefics2.make_list_of_images def make_list_of_images(images: ImageInput) -> List[List[np.ndarray]]: """ Convert a single image or a list of images to a list of numpy arrays. Args: images (`ImageInput`): A single image or a list of images. Returns: A list of numpy arrays. """ # If it's a single image, convert it to a list of lists if is_valid_image(images): images = [[images]] # If it's a list of images, it's a single batch, so convert it to a list of lists elif isinstance(images, (list, tuple)) and len(images) > 0 and is_valid_image(images[0]): images = [images] # If it's a list of batches, it's already in the right format elif ( isinstance(images, (list, tuple)) and len(images) > 0 and isinstance(images[0], (list, tuple)) and len(images[0]) > 0 and is_valid_image(images[0][0]) ): pass else: raise ValueError( "Invalid input type. Must be a single image, a list of images, or a list of batches of images." ) return images # Adapted from function in image_transforms.py to ensure any transparent pixels are converted to white. def convert_to_rgb(image: ImageInput) -> ImageInput: """ Converts an image to RGB format. Only converts if the image is of type PIL.Image.Image, otherwise returns the image as is. Args: image (Image): The image to convert. """ requires_backends(convert_to_rgb, ["vision"]) if not isinstance(image, PIL.Image.Image): return image if image.mode == "RGB": return image # First we convert to RGBA to set background to white. image = image.convert("RGBA") # Create a new image with a white background. new_image = PIL.Image.new("RGBA", image.size, "WHITE") new_image.paste(image, (0, 0), image) new_image = new_image.convert("RGB") return new_image def _num_image_tokens(image_size: Tuple[int, int], patch_size: Tuple[int, int]) -> int: """ Calculate the number of image tokens given the image size and patch size. Args: image_size (`Tuple[int, int]`): The size of the image as `(height, width)`. patch_size (`Tuple[int, int]`): The patch size as `(height, width)`. Returns: `int`: The number of image tokens. """ height, width = image_size patch_height, patch_width = patch_size if isinstance(patch_size, (tuple, list)) else (patch_size, patch_size) num_width_tokens = (width - 1) // patch_width + 1 num_height_tokens = (height - 1) // patch_height + 1 return num_height_tokens, num_width_tokens def get_resize_output_image_size( input_image: ImageInput, size: Union[int, Tuple[int, int], List[int], Tuple[int]], patch_size: Union[int, Tuple[int, int], List[int], Tuple[int]], input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> tuple: """ Find the target (height, width) dimension of the output image after resizing given the input image and the desired size. Args: input_image (`ImageInput`): The image to resize. size (`int` or `Tuple[int, int]`): Max image size an input image can be. Must be a dictionary with the key "longest_edge". patch_size (`int` or `Tuple[int, int]`): The patch_size as `(height, width)` to use for resizing the image. If patch_size is an integer, `(patch_size, patch_size)` will be used input_data_format (`ChannelDimension`, *optional*): The channel dimension format of the input image. If unset, will use the inferred format from the input. Returns: `tuple`: The target (height, width) dimension of the output image after resizing. """ max_height, max_width = size if isinstance(size, (tuple, list)) else (size, size) patch_height, patch_width = patch_size if isinstance(patch_size, (tuple, list)) else (patch_size, patch_size) height, width = get_image_size(input_image, input_data_format) ratio = max(height / max_height, width / max_width) if ratio > 1: # Orgiginal implementation uses `round` which utilises bankers rounding, which can lead to surprising results height = int(math.ceil(height / ratio)) width = int(math.ceil(width / ratio)) num_height_tokens, num_width_tokens = _num_image_tokens((height, width), (patch_height, patch_width)) return num_height_tokens * patch_height, num_width_tokens * patch_width # Hack to get tensor conversion used in BatchFeature without batching the images def _get_is_as_tensor_fns(tensor_type: Union[str, TensorType]) -> Tuple[Callable, Callable]: return BatchFeature()._get_is_as_tensor_fns(tensor_type) def convert_to_tensor(array, tensor_type: Union[str, TensorType]) -> Any: is_tensor, as_tensor = _get_is_as_tensor_fns(tensor_type) if is_tensor(array): return array return as_tensor(array) class PixtralImageProcessor(BaseImageProcessor): r""" Constructs a Pixtral image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`Dict[str, int]` *optional*, defaults to `{"longest_edge": 1024}`): Size of the maximum dimension of either the height or width dimension of the image. Used to control how images are resized. If either the height or width are greater than `size["longest_edge"]` then both the height and width are rescaled by `height / ratio`, `width /ratio` where `ratio = max(height / longest_edge, width / longest_edge)` patch_size (`Dict[str, int]` *optional*, defaults to `{"height": 16, "width": 16}`): Size of the patches in the model, used to calculate the output image size. Can be overridden by `patch_size` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, patch_size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BICUBIC, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_convert_rgb: bool = True, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"longest_edge": 1024} patch_size = patch_size if patch_size is not None else {"height": 16, "width": 16} patch_size = get_size_dict(patch_size, default_to_square=True) self.do_resize = do_resize self.size = size self.patch_size = patch_size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else [0.48145466, 0.4578275, 0.40821073] self.image_std = image_std if image_std is not None else [0.26862954, 0.26130258, 0.27577711] self.do_convert_rgb = do_convert_rgb self._valid_processor_keys = [ "images", "do_resize", "size", "patch_size", "resample", "do_rescale", "rescale_factor", "do_normalize", "image_mean", "image_std", "do_convert_rgb", "return_tensors", "data_format", "input_data_format", ] def resize( self, image: np.ndarray, size: Dict[str, int], patch_size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Dict containing the longest possible edge of the image. patch_size (`Dict[str, int]`): Patch size used to calculate the size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ if "longest_edge" in size: size = (size["longest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: size = (size["height"], size["width"]) else: raise ValueError("size must contain either 'longest_edge' or 'height' and 'width'.") if "height" in patch_size and "width" in patch_size: patch_size = (patch_size["height"], patch_size["width"]) else: raise ValueError("patch_size must contain either 'shortest_edge' or 'height' and 'width'.") output_size = get_resize_output_image_size( image, size=size, patch_size=patch_size, input_data_format=input_data_format, ) return resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) def preprocess( self, images: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, patch_size: Dict[str, int] = None, resample: PILImageResampling = None, do_rescale: bool = None, rescale_factor: float = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_convert_rgb: bool = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Describes the maximum input dimensions to the model. patch_size (`Dict[str, int]`, *optional*, defaults to `self.patch_size`): Patch size in the model. Used to calculate the image after resizing. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ patch_size = patch_size if patch_size is not None else self.patch_size patch_size = get_size_dict(patch_size, default_to_square=True) do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys) images_list = make_list_of_images(images) if not valid_images(images_list[0]): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample, ) if do_convert_rgb: images_list = [[convert_to_rgb(image) for image in images] for images in images_list] # All transformations expect numpy arrays. images_list = [[to_numpy_array(image) for image in images] for images in images_list] if do_rescale and is_scaled_image(images_list[0][0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images_list[0][0]) batch_images = [] batch_image_sizes = [] for sample_images in images_list: images = [] image_sizes = [] for image in sample_images: if do_resize: image = self.resize( image=image, size=size, patch_size=patch_size, resample=resample, input_data_format=input_data_format, ) if do_rescale: image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize( image=image, mean=image_mean, std=image_std, input_data_format=input_data_format ) images.append(image) image_sizes.append(get_image_size(image, input_data_format)) batch_images.append(images) batch_image_sizes.append(image_sizes) images_list = [ [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images] for images in batch_images ] # Convert to tensor type outside of BatchFeature to avoid batching the images of different sizes images_list = [[convert_to_tensor(image, return_tensors) for image in images] for images in images_list] return BatchMixFeature(data={"pixel_values": images_list, "image_sizes": batch_image_sizes}, tensor_type=None) __all__ = ["PixtralImageProcessor"]
transformers/src/transformers/models/pixtral/image_processing_pixtral.py/0
{ "file_path": "transformers/src/transformers/models/pixtral/image_processing_pixtral.py", "repo_id": "transformers", "token_count": 10136 }
129
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Pop2Piano model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class Pop2PianoConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Pop2PianoForConditionalGeneration`]. It is used to instantiate a Pop2PianoForConditionalGeneration model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Pop2Piano [sweetcocoa/pop2piano](https://huggingface.co/sweetcocoa/pop2piano) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Arguments: vocab_size (`int`, *optional*, defaults to 2400): Vocabulary size of the `Pop2PianoForConditionalGeneration` model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Pop2PianoForConditionalGeneration`]. composer_vocab_size (`int`, *optional*, defaults to 21): Denotes the number of composers. d_model (`int`, *optional*, defaults to 512): Size of the encoder layers and the pooler layer. d_kv (`int`, *optional*, defaults to 64): Size of the key, query, value projections per attention head. The `inner_dim` of the projection layer will be defined as `num_heads * d_kv`. d_ff (`int`, *optional*, defaults to 2048): Size of the intermediate feed forward layer in each `Pop2PianoBlock`. num_layers (`int`, *optional*, defaults to 6): Number of hidden layers in the Transformer encoder. num_decoder_layers (`int`, *optional*): Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set. num_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. relative_attention_num_buckets (`int`, *optional*, defaults to 32): The number of buckets to use for each attention layer. relative_attention_max_distance (`int`, *optional*, defaults to 128): The maximum distance of the longer sequences for the bucket separation. dropout_rate (`float`, *optional*, defaults to 0.1): The ratio for all dropout layers. layer_norm_epsilon (`float`, *optional*, defaults to 1e-6): The epsilon used by the layer normalization layers. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1.0, used internally for initialization testing). feed_forward_proj (`string`, *optional*, defaults to `"gated-gelu"`): Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). dense_act_fn (`string`, *optional*, defaults to `"relu"`): Type of Activation Function to be used in `Pop2PianoDenseActDense` and in `Pop2PianoDenseGatedActDense`. """ model_type = "pop2piano" keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, vocab_size=2400, composer_vocab_size=21, d_model=512, d_kv=64, d_ff=2048, num_layers=6, num_decoder_layers=None, num_heads=8, relative_attention_num_buckets=32, relative_attention_max_distance=128, dropout_rate=0.1, layer_norm_epsilon=1e-6, initializer_factor=1.0, feed_forward_proj="gated-gelu", # noqa is_encoder_decoder=True, use_cache=True, pad_token_id=0, eos_token_id=1, dense_act_fn="relu", **kwargs, ): self.vocab_size = vocab_size self.composer_vocab_size = composer_vocab_size self.d_model = d_model self.d_kv = d_kv self.d_ff = d_ff self.num_layers = num_layers self.num_decoder_layers = num_decoder_layers if num_decoder_layers is not None else self.num_layers self.num_heads = num_heads self.relative_attention_num_buckets = relative_attention_num_buckets self.relative_attention_max_distance = relative_attention_max_distance self.dropout_rate = dropout_rate self.layer_norm_epsilon = layer_norm_epsilon self.initializer_factor = initializer_factor self.feed_forward_proj = feed_forward_proj self.use_cache = use_cache self.dense_act_fn = dense_act_fn self.is_gated_act = self.feed_forward_proj.split("-")[0] == "gated" self.hidden_size = self.d_model self.num_attention_heads = num_heads self.num_hidden_layers = num_layers super().__init__( pad_token_id=pad_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, **kwargs, ) __all__ = ["Pop2PianoConfig"]
transformers/src/transformers/models/pop2piano/configuration_pop2piano.py/0
{ "file_path": "transformers/src/transformers/models/pop2piano/configuration_pop2piano.py", "repo_id": "transformers", "token_count": 2352 }
130
# coding=utf-8 # Copyright 2020, The RAG Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TFRAG model implementation.""" from __future__ import annotations import copy from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import tensorflow as tf from ...configuration_utils import PretrainedConfig from ...generation import TFLogitsProcessorList from ...modeling_tf_utils import ( TFCausalLanguageModelingLoss, TFModelInputType, TFPreTrainedModel, keras, shape_list, unpack_inputs, ) from ...utils import ModelOutput, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "RagConfig" @dataclass class TFRetrievAugLMMarginOutput(ModelOutput): """ Base class for retriever augmented marginalized models outputs. Args: loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss. logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head. The score is possibly marginalized over all documents for each vocabulary token. past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used (see `past_key_values` input) to speed up sequential decoding. doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`): Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and `question_encoder_last_hidden_state`. retrieved_doc_embeds (`tf.Tensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*): Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute the `doc_scores`. retrieved_doc_ids (`tf.Tensor` (int32) of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*): The indexes of the embedded documents retrieved by the retriever. context_input_ids (`tf.Tensor`(int32) of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever. context_attention_mask (`tf.Tensor` (int32) of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. question_encoder_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden states at the output of the last layer of the question encoder pooled output of the model. question_enc_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden states of the question encoder at the output of each layer plus the initial embedding outputs. question_enc_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the question encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. generator_enc_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the generator encoder of the model. generator_enc_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs. generator_enc_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. generator_dec_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs. generator_dec_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor | None = None logits: tf.Tensor = None past_key_values: List[tf.Tensor] | None = None doc_scores: tf.Tensor | None = None retrieved_doc_embeds: tf.Tensor | None = None retrieved_doc_ids: tf.Tensor | None = None context_input_ids: tf.Tensor | None = None context_attention_mask: tf.Tensor | None = None question_encoder_last_hidden_state: tf.Tensor | None = None question_enc_hidden_states: Tuple[tf.Tensor, ...] | None = None question_enc_attentions: Tuple[tf.Tensor, ...] | None = None generator_enc_last_hidden_state: tf.Tensor | None = None generator_enc_hidden_states: Tuple[tf.Tensor, ...] | None = None generator_enc_attentions: Tuple[tf.Tensor, ...] | None = None generator_dec_hidden_states: Tuple[tf.Tensor, ...] | None = None generator_dec_attentions: Tuple[tf.Tensor, ...] | None = None @dataclass class TFRetrievAugLMOutput(ModelOutput): """ Args: logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head. The score is possibly marginalized over all documents for each vocabulary token. past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used (see `past_key_values` input) to speed up sequential decoding. doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`): Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and `question_encoder_last_hidden_state`. retrieved_doc_embeds (`tf.Tensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*): Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute the `doc_scores`. retrieved_doc_ids (`tf.Tensor` of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*): The indexes of the embedded documents retrieved by the retriever. context_input_ids (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever. context_attention_mask (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. question_encoder_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden states at the output of the last layer of the question encoder pooled output of the model. question_enc_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden states of the question encoder at the output of each layer plus the initial embedding outputs. question_enc_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the question encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. generator_enc_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the generator encoder of the model. generator_enc_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs. generator_enc_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. generator_dec_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs. generator_dec_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: tf.Tensor = None past_key_values: List[tf.Tensor] | None = None doc_scores: tf.Tensor | None = None retrieved_doc_embeds: tf.Tensor | None = None retrieved_doc_ids: tf.Tensor | None = None context_input_ids: tf.Tensor | None = None context_attention_mask: tf.Tensor | None = None question_encoder_last_hidden_state: tf.Tensor | None = None question_enc_hidden_states: Tuple[tf.Tensor, ...] | None = None question_enc_attentions: Tuple[tf.Tensor, ...] | None = None generator_enc_last_hidden_state: tf.Tensor | None = None generator_enc_hidden_states: Tuple[tf.Tensor, ...] | None = None generator_enc_attentions: Tuple[tf.Tensor, ...] | None = None generator_dec_hidden_states: Tuple[tf.Tensor, ...] | None = None generator_dec_attentions: Tuple[tf.Tensor, ...] | None = None class TFRagPreTrainedModel(TFPreTrainedModel): r""" RAG models were released with the paper [Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks](https://arxiv.org/abs/2005.11401) by Patrick Lewis, Ethan Perez, Aleksandra Piktus et al. RAG is a retriever augmented model and encapsulate three components: a question encoder, a dataset retriever and a generator, the encoder and generator are trainable while the retriever is just an indexed dataset. """ config_class = RagConfig base_model_prefix = "rag" _keys_to_ignore_on_load_missing = [r"position_ids"] @classmethod def from_pretrained_question_encoder_generator( cls, question_encoder_pretrained_model_name_or_path: str = None, generator_pretrained_model_name_or_path: str = None, retriever: RagRetriever = None, *model_args, **kwargs, ) -> TFPreTrainedModel: r""" Instantiates an question encoder and a generator from one or two base classes of the library from pretrained model checkpoints. Params: question_encoder_pretrained_model_name_or_path (`str`, *optional*): Information necessary to initiate the question encoder. Can be either: - A string with the *shortcut name* of a pretrained model to load from cache or download, e.g., `google-bert/bert-base-uncased`. - A string with the *identifier name* of a pretrained model that was user-uploaded to our S3, e.g., `dbmdz/bert-base-german-cased`. - A path to a *directory* containing model weights saved using [`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. - A path or url to a *pytorch index checkpoint file* (e.g, `./pt_model/`). In this case, `question_encoder_from_pt` should be set to `True`. generator_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`): Information necessary to initiate the generator. Can be either: - A string with the *shortcut name* of a pretrained model to load from cache or download, e.g., `google-t5/t5-small`. - A string with the *identifier name* of a pretrained model that was user-uploaded to our S3, e.g., `facebook/bart-base`. - A path to a *directory* containing model weights saved using [`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. - A path or url to a *pytorch checkpoint file* (e.g, `./pt_model/`). In this case, `generator_from_pt` should be set to `True`. model_args (remaining positional arguments, *optional*): All remaining positional arguments will be passed to the underlying model's `__init__` method. retriever ([`RagRetriever`], *optional*): The retriever to use. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., `output_attentions=True`). - To update the question_encoder configuration, use the prefix *question_encoder_* for each configuration parameter. - To update the generator configuration, use the prefix *generator_* for each configuration parameter. - To update the parent model configuration, do not use a prefix for each configuration parameter. Behaves differently depending on whether a `config` is provided or automatically loaded. Example: ```python >>> from transformers import RagRetriever, TFRagModel >>> # initialize a RAG from two pretrained models. >>> model = TFRagModel.from_pretrained_question_encoder_generator( ... "facebook/dpr-question_encoder-single-nq-base", "google-t5/t5-small" ... ) >>> # alternatively, initialize from pytorch pretrained models can also be done >>> model = TFRagModel.from_pretrained_question_encoder_generator( ... "facebook/dpr-question_encoder-single-nq-base", ... "facebook/bart-base", ... generator_from_pt=True, ... question_encoder_from_pt=True, ... ) >>> # saving model after fine-tuning >>> model.save_pretrained("./rag") >>> # load retriever >>> retriever = RagRetriever.from_pretrained( ... "facebook/rag-token-base", index_name="exact", use_dummy_dataset=True ... ) >>> # load fine-tuned model with retriever >>> model = TFRagModel.from_pretrained("./rag", retriever=retriever) ```""" kwargs_question_encoder = { argument[len("question_encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("question_encoder_") } kwargs_generator = { argument[len("generator_") :]: value for argument, value in kwargs.items() if argument.startswith("generator_") } # remove question_encoder, generator kwargs from kwargs for key in kwargs_question_encoder.keys(): del kwargs["question_encoder_" + key] for key in kwargs_generator.keys(): del kwargs["generator_" + key] # Load and initialize the question_encoder and generator # The distinction between question_encoder and generator at the model level is made # by the value of the flag `is_generator` that we need to set correctly. question_encoder = kwargs_question_encoder.pop("model", None) if question_encoder is None: assert question_encoder_pretrained_model_name_or_path is not None, ( "If `model` is not defined as an argument, a `question_encoder_pretrained_model_name_or_path` has to" " be defined" ) from ..auto.modeling_tf_auto import TFAutoModel if "config" not in kwargs_question_encoder: from ..auto.configuration_auto import AutoConfig question_encoder_config = AutoConfig.from_pretrained(question_encoder_pretrained_model_name_or_path) kwargs_question_encoder["config"] = question_encoder_config question_encoder = TFAutoModel.from_pretrained( question_encoder_pretrained_model_name_or_path, name="question_encoder", load_weight_prefix=cls.load_weight_prefix, *model_args, **kwargs_question_encoder, ) generator = kwargs_generator.pop("generator", None) if generator is None: assert generator_pretrained_model_name_or_path is not None, ( "If `generator_model` is not defined as an argument, a `generator_pretrained_model_name_or_path` has" " to be defined" ) from ..auto.modeling_tf_auto import TFAutoModelForSeq2SeqLM if "config" not in kwargs_generator: from ..auto.configuration_auto import AutoConfig generator_config = AutoConfig.from_pretrained(generator_pretrained_model_name_or_path) kwargs_generator["config"] = generator_config generator = TFAutoModelForSeq2SeqLM.from_pretrained( generator_pretrained_model_name_or_path, name="generator", load_weight_prefix=cls.load_weight_prefix, **kwargs_generator, ) # instantiate config with corresponding kwargs config = kwargs.get("config", None) if config is None: config = RagConfig.from_question_encoder_generator_configs( question_encoder.config, generator.config, **kwargs ) return cls(question_encoder=question_encoder, generator=generator, config=config, retriever=retriever) RAG_START_DOCSTRING = r""" RAG is a sequence-to-sequence model which encapsulates two core components: a question encoder and a generator. During a forward pass, we encode the input with the question encoder and pass it to the retriever to extract relevant context documents. The documents are then prepended to the input. Such contextualized inputs is passed to the generator. The question encoder can be any *autoencoding* model, preferably [`TFDPRQuestionEncoder`], and the generator can be any *seq2seq* model, preferably [`TFBartForConditionalGeneration`]. The model can be initialized with a [`RagRetriever`] for end-to-end generation or used in combination with the outputs of a retriever in multiple steps---see examples for more details. The model is compatible any *autoencoding* model as the `question_encoder` and any *seq2seq* model with language model head as the `generator`. It has been tested with [`TFDPRQuestionEncoder`] as the `question_encoder` and [`TFBartForConditionalGeneration`] as the `generator`. This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a Tensorflow [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. The model is in a developing state as it is now fully supports in eager-mode only, and may not be exported in SavedModel format. Args: config ([`RagConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. question_encoder ([`TFPreTrainedModel`]): An encoder model compatible with the faiss index encapsulated by the `retriever`. generator ([`TFPreTrainedModel`]): A seq2seq model used as the generator in the RAG architecture. retriever ([`RagRetriever`]): A retriever class encapsulating a faiss index queried to obtain context documents for current inputs. """ RAG_FORWARD_INPUTS_DOCSTRING = r""" Args: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. [`RagConfig`], used to initialize the model, specifies which generator to use, it also specifies a compatible generator tokenizer. Use that tokenizer class to obtain the indices. attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_outputs (`tuple(tuple(tf.Tensor)`, *optional*) Tuple consists of (`generator_enc_last_hidden_state`, *optional*: `generator_enc_hidden_states`, *optional*: `generator_enc_attentions`). `generator_enc_last_hidden_state` of shape `(batch_size, n_docs * sequence_length, hidden_size)` is a sequence of hidden-states at the output of the last layer of the generator's encoder. Used by the ([`TFRagModel`]) model during decoding. decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): Provide for generation tasks. `None` by default, construct as per instructions for the generator model you're using with your RAG instance. decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. past_key_values (`tuple(tuple(tf.Tensor))`): Tuple consists of two elements: `encoder_outputs` of the RAG model (see `encoder_outputs`) and `past_key_values` of the underlying generator. Can be used to speed up decoding. `past_key_values` are used in the ([`RagTokenForGeneration`]) model during decoding. doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`): Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and `question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever` `doc_scores` has to be provided to the forward pass. `doc_scores` can be computed via `question_encoder_last_hidden_state` and `retrieved_doc_embeds`, see examples for more information. context_input_ids (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. If the model has is not initialized with a `retriever` ``context_input_ids` has to be provided to the forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`]. context_attention_mask (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. If the model has is not initialized with a `retriever` `context_attention_mask` has to be provided to the forward pass. `context_attention_mask` are returned by [`~RagRetriever.__call__`]. use_cache (`bool`, *optional*, defaults to `True`): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. output_retrieved(`bool`, *optional*): Whether or not to return the `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and `context_attention_mask`. See returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`TFRetrievAugLMOutput`] instead of a plain tuple. n_docs (`int`, *optional*, defaults to `config.n_docs``) Number of documents to retrieve and/or number of documents for which to generate an answer. """ @add_start_docstrings_to_model_forward(RAG_START_DOCSTRING) class TFRagModel(TFRagPreTrainedModel): load_weight_prefix = "tf_rag_model_1" def __init__( self, config: Optional[PretrainedConfig] = None, question_encoder: Optional[TFPreTrainedModel] = None, generator: Optional[TFPreTrainedModel] = None, retriever: Optional[RagRetriever] = None, load_weight_prefix: Optional[str] = None, **kwargs, ): assert config is not None or ( question_encoder is not None and generator is not None ), "Either a configuration or an question_encoder and a generator has to be provided." if config is None: config = RagConfig.from_question_encoder_generator_configs( question_encoder.config, generator.config, **kwargs ) else: assert isinstance(config, self.config_class), f"config: {config} has to be of type {self.config_class}" super().__init__(config, **kwargs) if question_encoder is None: from ..auto.modeling_tf_auto import TFAutoModel question_encoder = TFAutoModel.from_config(config.question_encoder, name="question_encoder") if generator is None: from ..auto.modeling_tf_auto import TFAutoModelForSeq2SeqLM load_weight_prefix = load_weight_prefix if load_weight_prefix is not None else self.load_weight_prefix generator = TFAutoModelForSeq2SeqLM.from_config( config.generator, name="generator", load_weight_prefix=load_weight_prefix + "/generator" ) self.retriever = retriever if self.retriever is not None: assert isinstance( retriever, RagRetriever ), f"`self.retriever` is of type {type(self.retriever)}, but should be of type `RagRetriever`" self.retriever = retriever self.question_encoder = question_encoder self.generator = generator def set_retriever(self, retriever: RagRetriever): self.retriever = retriever @unpack_inputs @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFRetrievAugLMOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, encoder_outputs: np.ndarray | tf.Tensor | None = None, decoder_input_ids: np.ndarray | tf.Tensor | None = None, decoder_attention_mask: np.ndarray | tf.Tensor | None = None, past_key_values: Tuple[Tuple[Union[np.ndarray, tf.Tensor]]] | None = None, doc_scores: np.ndarray | tf.Tensor | None = None, context_input_ids: np.ndarray | tf.Tensor | None = None, context_attention_mask: np.ndarray | tf.Tensor | None = None, use_cache: bool | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, output_retrieved: bool | None = None, n_docs: int | None = None, return_dict: bool | None = None, training: bool = False, **kwargs, ) -> TFRetrievAugLMOutput: r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, RagRetriever, TFRagModel >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-token-base") >>> retriever = RagRetriever.from_pretrained( ... "facebook/rag-token-base", index_name="exact", use_dummy_dataset=True ... ) >>> # initialize with RagRetriever to do everything in one forward call >>> model = TFRagModel.from_pretrained("facebook/rag-token-base", retriever=retriever, from_pt=True) >>> input_dict = tokenizer.prepare_seq2seq_batch( ... "How many people live in Paris?", "In Paris, there are 10 million people.", return_tensors="tf" ... ) >>> input_ids = input_dict["input_ids"] >>> outputs = model(input_ids) ```""" assert ( "decoder_cached_states" not in kwargs ), "Please use past_key_values to cache intermediate outputs" # from modeling_tf_bart.py # aliasing to minimize code changing n_docs = n_docs if n_docs is not None else self.config.n_docs # whether retriever has to be used has_to_retrieve = ( self.retriever is not None and (context_input_ids is None or context_attention_mask is None or doc_scores is None) and encoder_outputs is None ) # encoder_outputs are pre-computed during RAG-token generation if encoder_outputs is None: if has_to_retrieve: question_enc_outputs = self.question_encoder( input_ids, attention_mask=attention_mask, return_dict=True, training=training ) # see https://github.com/huggingface/transformers/blob/main/src/transformers/models/dpr/modeling_tf_dpr.py#L91 question_encoder_last_hidden_state = question_enc_outputs[ 0 ] # hidden states of question encoder => pooler_output retriever_outputs = self.retriever( input_ids, question_encoder_last_hidden_state.numpy(), prefix=self.generator.config.prefix, n_docs=n_docs, return_tensors="tf", ) context_input_ids, context_attention_mask, retrieved_doc_embeds, retrieved_doc_ids = ( retriever_outputs["context_input_ids"], retriever_outputs["context_attention_mask"], retriever_outputs["retrieved_doc_embeds"], retriever_outputs["doc_ids"], ) context_input_ids = tf.cast(context_input_ids, tf.int32) context_attention_mask = tf.cast(context_attention_mask, tf.int32) retrieved_doc_embeds = tf.cast(retrieved_doc_embeds, tf.float32) retrieved_doc_ids = tf.cast(retrieved_doc_ids, tf.int32) # compute doc_scores doc_scores = tf.squeeze( tf.matmul( tf.expand_dims(question_encoder_last_hidden_state, axis=1), retrieved_doc_embeds, transpose_b=True, ), axis=1, ) else: assert context_input_ids is not None, ( "Make sure that `context_input_ids` are passed, if no `retriever` is set. Alternatively, you can" " set a retriever using the `set_retriever(...)` function." ) assert context_attention_mask is not None, ( "Make sure that `context_attention_mask` are passed, if no `retriever` is set. Alternatively, you" " can set a retriever using the `set_retriever(...)` function." ) assert doc_scores is not None, ( "Make sure that `doc_scores` are passed, if no `retriever` is set. Alternatively, you can set a" " retriever using the `set_retriever(...)` function." ) assert ( doc_scores is not None ), "Make sure that `doc_scores` are passed when passing `encoder_outputs` to the forward function." assert (doc_scores.shape[1] % n_docs) == 0, ( f" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is" f" {context_input_ids.shape[0]}." ) # Decoder input without context documents if decoder_input_ids is not None: decoder_input_ids = tf.repeat(decoder_input_ids, n_docs, axis=0) if decoder_attention_mask is not None: decoder_attention_mask = tf.repeat(decoder_attention_mask, n_docs, axis=0) gen_outputs = self.generator( context_input_ids, attention_mask=context_attention_mask, encoder_outputs=encoder_outputs, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, return_dict=True, training=training, ) if not has_to_retrieve: question_encoder_last_hidden_state = None question_enc_hidden_states = None question_enc_attentions = None retrieved_doc_embeds = None retrieved_doc_ids = None else: question_enc_hidden_states = question_enc_outputs.hidden_states question_enc_attentions = question_enc_outputs.attentions if not has_to_retrieve or not output_retrieved: # don't output retrieved docs context_input_ids = (None,) context_attention_mask = None retrieved_doc_embeds = None retrieved_doc_ids = None return TFRetrievAugLMOutput( logits=gen_outputs.logits, doc_scores=doc_scores, past_key_values=gen_outputs.past_key_values, context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, retrieved_doc_embeds=retrieved_doc_embeds, retrieved_doc_ids=retrieved_doc_ids, question_encoder_last_hidden_state=question_encoder_last_hidden_state, question_enc_hidden_states=question_enc_hidden_states, question_enc_attentions=question_enc_attentions, generator_enc_last_hidden_state=gen_outputs.encoder_last_hidden_state, generator_enc_hidden_states=gen_outputs.encoder_hidden_states, generator_enc_attentions=gen_outputs.encoder_attentions, generator_dec_hidden_states=gen_outputs.decoder_hidden_states, generator_dec_attentions=gen_outputs.decoder_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True with tf.name_scope(self.generator.name): self.generator.build(None) with tf.name_scope(self.question_encoder.name): self.question_encoder.build(None) @add_start_docstrings_to_model_forward( """ A TF RAG-token model implementation. It performs RAG-token specific marginalization in the forward pass. """, RAG_START_DOCSTRING, ) class TFRagTokenForGeneration(TFRagPreTrainedModel, TFCausalLanguageModelingLoss): load_weight_prefix = "tf_rag_token_for_generation_1/rag" def __init__( self, config: Optional[PretrainedConfig] = None, question_encoder: Optional[TFPreTrainedModel] = None, generator: Optional[TFPreTrainedModel] = None, retriever: Optional[RagRetriever] = None, **kwargs, ): assert config is not None or ( question_encoder is not None and generator is not None ), "Either a configuration or an encoder and a generator has to be provided." if config is None: config = RagConfig.from_question_encoder_generator_configs( question_encoder.config, generator.config, **kwargs ) super().__init__(config) # instantiate model self.rag = TFRagModel( config=config, question_encoder=question_encoder, generator=generator, retriever=retriever, load_weight_prefix=self.load_weight_prefix, name="rag", ) def set_retriever(self, retriever: RagRetriever): self.rag.retriever = retriever # Adapted from https://github.com/huggingface/transformers/blob/main/src/transformers/modeling_tf_bart.py def prepare_inputs_for_generation( self, decoder_input_ids, past_key_values=None, attention_mask=None, use_cache=None, encoder_outputs=None, doc_scores=None, n_docs=None, **kwargs, ): if past_key_values is not None: # if past is defined use only last decoder_input_ids decoder_input_ids = decoder_input_ids[:, -1:] return { "input_ids": None, "encoder_outputs": encoder_outputs, "doc_scores": doc_scores, "context_attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "past_key_values": past_key_values, "use_cache": use_cache, "do_marginalize": True, "n_docs": n_docs, } @property def retriever(self): return self.rag.retriever @property def generator(self): return self.rag.generator @property def question_encoder(self): return self.rag.question_encoder @staticmethod def _gather_beams(nested, beam_indices, batch_axis=0): """ RAG-specific `_gather_beams`: gathers the beam slices indexed by beam_indices into new beam array. If the nested tensor has a shape mismatch with the beam indices, then it means it is the cache. In that case, isolates and takes care of the extra dimension for ndocs. """ def gather_fn(tensor): is_rag_cache = tensor.shape[0] != beam_indices.shape[0] if is_rag_cache: n_docs = tensor.shape[0] // beam_indices.shape[0] batch_size = beam_indices.shape[0] # reshapes into (batch size, num beams, n_docs, ...), the cache format expected by RAG tensor = tf.reshape(tensor, (batch_size, -1, n_docs, *tensor.shape[2:])) gathered_tensor = tf.gather(params=tensor, indices=beam_indices, axis=1, batch_dims=1) if is_rag_cache: # reshapes back into the shape expected by beam search gathered_tensor = tf.reshape(gathered_tensor, (batch_size * n_docs, -1, *gathered_tensor.shape[3:])) return gathered_tensor return tf.nest.map_structure(gather_fn, nested) def marginalize(self, seq_logits, doc_scores, n_docs=None): n_docs = n_docs if n_docs is not None else self.config.n_docs # RAG-token marginalization seq_logprobs = tf.nn.log_softmax(seq_logits, axis=-1) seq_logprobs = tf.reshape(seq_logprobs, [seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.shape[-1]]) doc_logprobs = tf.nn.log_softmax(doc_scores, axis=1) doc_logprobs = tf.expand_dims(doc_logprobs, axis=-1) doc_logprobs = tf.expand_dims(doc_logprobs, axis=-1) # twice log_prob_sum = seq_logprobs + doc_logprobs return tf.reduce_logsumexp(log_prob_sum, axis=1) @unpack_inputs @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFRetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, decoder_input_ids: np.ndarray | tf.Tensor | None = None, decoder_attention_mask: np.ndarray | tf.Tensor | None = None, encoder_outputs: np.ndarray | tf.Tensor | None = None, past_key_values: Tuple[Tuple[Union[np.ndarray, tf.Tensor]]] | None = None, doc_scores: np.ndarray | tf.Tensor | None = None, context_input_ids: np.ndarray | tf.Tensor | None = None, context_attention_mask: np.ndarray | tf.Tensor | None = None, use_cache: bool | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, output_retrieved: bool | None = None, n_docs: int | None = None, do_marginalize: bool | None = None, labels: np.ndarray | tf.Tensor | None = None, reduce_loss: bool | None = None, return_dict: bool | None = None, training: bool = False, **kwargs, # needs kwargs for generation ) -> TFRetrievAugLMMarginOutput: r""" do_marginalize (`bool`, *optional*): If `True`, the logits are marginalized over all documents by making use of `torch.nn.functional.log_softmax`. labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the cross entropy classification loss according to Rag-Token model formulation See https://arxiv.org/pdf/2005.11401.pdf Section 2.1 for details about Rag-Token formulation. Indices should be in `[0, ..., config.vocab_size - 1]`. reduce_loss (`bool`, *optional*): Only relevant if `labels` is passed. If `True`, the NLL loss is reduced using the `tf.Tensor.sum` operation. kwargs (`Dict[str, any]`, *optional*, defaults to `{}`): Legacy dictionary, which is required so that model can use *generate()* function. Returns: Example: ```python >>> import tensorflow as tf >>> from transformers import AutoTokenizer, RagRetriever, TFRagTokenForGeneration >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-token-nq") >>> retriever = RagRetriever.from_pretrained( ... "facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True ... ) >>> # initialize with RagRetriever to do everything in one forward call >>> model = TFRagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever, from_pt=True) >>> input_dict = tokenizer.prepare_seq2seq_batch( ... "How many people live in Paris?", "In Paris, there are 10 million people.", return_tensors="tf" ... ) >>> outputs = model(input_dict, output_retrieved=True) >>> # or use retriever separately >>> # 1. Encode >>> input_ids = input_dict["input_ids"] >>> question_hidden_states = model.question_encoder(input_ids)[0] >>> # 2. Retrieve >>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.numpy(), return_tensors="tf") >>> doc_scores = tf.squeeze( ... tf.matmul( ... tf.expand_dims(question_hidden_states, axis=1), docs_dict["retrieved_doc_embeds"], transpose_b=True ... ), ... axis=1, ... ) >>> # 3. Forward to generator >>> outputs = model( ... inputs=None, ... context_input_ids=docs_dict["context_input_ids"], ... context_attention_mask=docs_dict["context_attention_mask"], ... doc_scores=doc_scores, ... decoder_input_ids=input_dict["labels"], ... ) >>> # or directly generate >>> generated = model.generate( ... context_input_ids=docs_dict["context_input_ids"], ... context_attention_mask=docs_dict["context_attention_mask"], ... doc_scores=doc_scores, ... ) >>> generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True) ```""" assert ( "decoder_cached_states" not in kwargs ), "Please use past_key_values to cache intermediate outputs" # from modeling_tf_bart.py do_marginalize = do_marginalize if do_marginalize else self.config.do_marginalize reduce_loss = reduce_loss if reduce_loss else self.config.reduce_loss if labels is not None: if decoder_input_ids is None: decoder_input_ids = labels use_cache = False outputs = self.rag( input_ids, attention_mask=attention_mask, encoder_outputs=encoder_outputs, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, doc_scores=doc_scores, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_retrieved=output_retrieved, n_docs=n_docs, training=training, ) loss = None logits = outputs.logits if labels is not None: assert decoder_input_ids is not None loss = self.get_nll( outputs.logits, outputs.doc_scores, labels, reduce_loss=reduce_loss, epsilon=self.config.label_smoothing, n_docs=n_docs, ) if do_marginalize: logits = self.marginalize(logits, outputs.doc_scores, n_docs) return TFRetrievAugLMMarginOutput( loss=loss, logits=logits, past_key_values=outputs.past_key_values, doc_scores=outputs.doc_scores, context_input_ids=outputs.context_input_ids, context_attention_mask=outputs.context_attention_mask, retrieved_doc_embeds=outputs.retrieved_doc_embeds, retrieved_doc_ids=outputs.retrieved_doc_ids, question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state, question_enc_hidden_states=outputs.question_enc_hidden_states, question_enc_attentions=outputs.question_enc_attentions, generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state, generator_enc_hidden_states=outputs.generator_enc_hidden_states, generator_enc_attentions=outputs.generator_enc_attentions, generator_dec_hidden_states=outputs.generator_dec_hidden_states, generator_dec_attentions=outputs.generator_dec_attentions, ) def generate( self, input_ids: TFModelInputType | None = None, attention_mask: tf.Tensor | None = None, context_input_ids=None, context_attention_mask=None, doc_scores=None, n_docs=None, generation_config=None, logits_processor=TFLogitsProcessorList(), **kwargs, ): """ Implements TFRAG token decoding. Args: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): The sequence used as a prompt for the generation. If `input_ids` is not passed, then `context_input_ids` has to be provided. attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) context_input_ids (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`]. context_attention_mask (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`]. doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`): Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and `question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`]. n_docs (`int`, *optional*, defaults to `config.n_docs`) Number of documents to retrieve and/or number of documents for which to generate an answer. generation_config (`~generation.GenerationConfig`, *optional*): The generation configuration to be used as base parametrization for the generation call. `**kwargs` passed to generate matching the attributes of `generation_config` will override them. If `generation_config` is not provided, the default will be used, which had the following loading priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s default values, whose documentation should be checked to parameterize generation. logits_processor (`TFLogitsProcessorList`, *optional*): Custom logits processors that complement the default logits processors built from arguments and a model's config. If a logit processor is passed that is already created with the arguments or a model's config an error is thrown. kwargs (`Dict[str, Any]`, *optional*): Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be forwarded to the `forward` function of the model. Return: `tf.Tensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. """ # Handle `generation_config` and kwargs that might update it if generation_config is None: generation_config = self.generation_config generation_config = copy.deepcopy(generation_config) model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs # set default parameters n_docs = n_docs if n_docs is not None else self.config.n_docs # retrieve docs if self.retriever is not None and context_input_ids is None: question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0] out = self.retriever( input_ids, question_hidden_states.numpy().astype(np.float32), prefix=self.generator.config.prefix, n_docs=n_docs, return_tensors="tf", ) context_input_ids, context_attention_mask, retrieved_doc_embeds = ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) context_input_ids = tf.cast(context_input_ids, tf.int32) context_attention_mask = tf.cast(context_attention_mask, tf.int32) retrieved_doc_embeds = tf.cast(retrieved_doc_embeds, tf.float32) # compute doc_scores doc_scores = tf.matmul( tf.expand_dims(question_hidden_states, axis=1), retrieved_doc_embeds, transpose_b=True ) doc_scores = tf.squeeze(doc_scores, axis=1) assert (context_input_ids.shape[0] % n_docs) == 0, ( f" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is" f" {context_input_ids.shape[0]}." ) batch_size = context_input_ids.shape[0] // n_docs encoder = self.rag.generator.get_encoder() encoder_outputs = encoder( input_ids=context_input_ids, attention_mask=context_attention_mask, output_attentions=generation_config.output_attentions, output_hidden_states=generation_config.output_hidden_states, return_dict=True, ) decoder_input_ids = tf.fill( (batch_size * generation_config.num_beams, 1), tf.cast(generation_config.decoder_start_token_id, tf.int32), ) last_hidden_state = encoder_outputs["last_hidden_state"] def extend_enc_output(tensor, num_beams=None): """ Broadcast tensor with `num_beams` replica, with correct order Input: tensor of shape (batch_size*n_docs , d) Output: tensor of shape (batch_size*num_beams*n_docs , d) """ # expand batch_size & num_beam dimensions d_shape_list = tensor.shape[1:] # split n_docs dimensions new_shape = (batch_size, 1, n_docs) + d_shape_list tensor = tf.reshape(tensor, new_shape) # repeat same last hidden states over `num_beams` dimension new_shape = (batch_size, num_beams, n_docs) + d_shape_list tensor = tf.broadcast_to(tensor, new_shape) # merge `batch_size`, `num_beams`, `num_docs` dims again new_shape = (batch_size * num_beams * n_docs,) + d_shape_list return tf.reshape(tensor, new_shape) # correctly extend last_hidden_state and attention mask context_attention_mask = extend_enc_output(context_attention_mask, num_beams=generation_config.num_beams) encoder_outputs["last_hidden_state"] = extend_enc_output( last_hidden_state, num_beams=generation_config.num_beams ) doc_scores = tf.repeat(doc_scores, generation_config.num_beams, axis=0) # define start_len & additional parameters model_kwargs["doc_scores"] = doc_scores model_kwargs["encoder_outputs"] = encoder_outputs model_kwargs["attention_mask"] = context_attention_mask model_kwargs["n_docs"] = n_docs pre_processor = self._get_logits_processor( generation_config=generation_config, input_ids_seq_length=tf.shape(decoder_input_ids)[-1], logits_processor=logits_processor, ) if generation_config.num_beams == 1: return self.greedy_search( input_ids=decoder_input_ids, max_length=generation_config.max_length, pad_token_id=generation_config.pad_token_id, eos_token_id=generation_config.eos_token_id, logits_processor=pre_processor, output_attentions=generation_config.output_attentions, output_hidden_states=generation_config.output_hidden_states, output_scores=generation_config.output_scores, return_dict_in_generate=generation_config.return_dict_in_generate, **model_kwargs, ) elif generation_config.num_beams > 1: if generation_config.num_beams < generation_config.num_return_sequences: raise ValueError( "Beam search decoding cannot return more sequences than it has beams. Please set num_beams >=" f" num_return_sequences, got {generation_config.num_beams} and" f" {generation_config.num_return_sequences} (respectivelly)" ) def unflatten_beam_dim(tensor): """Unflattens the first, flat batch*beam dimension of a non-scalar array.""" shape = shape_list(tensor) return tf.reshape(tensor, [-1, generation_config.num_beams] + shape[1:]) decoder_input_ids = unflatten_beam_dim(decoder_input_ids) model_kwargs["attention_mask"] = unflatten_beam_dim(model_kwargs["attention_mask"]) model_kwargs["encoder_outputs"]["last_hidden_state"] = unflatten_beam_dim( model_kwargs["encoder_outputs"]["last_hidden_state"] ) return self.beam_search( input_ids=decoder_input_ids, max_length=generation_config.max_length, pad_token_id=generation_config.pad_token_id, eos_token_id=generation_config.eos_token_id, logits_processor=pre_processor, output_attentions=generation_config.output_attentions, output_hidden_states=generation_config.output_hidden_states, output_scores=generation_config.output_scores, return_dict_in_generate=generation_config.return_dict_in_generate, **model_kwargs, ) else: raise ValueError( f"`num_beams` has to be an integer strictly superior to 0 (≥ 1), but is {generation_config.num_beams}" ) def get_input_embeddings(self): return self.rag.generator.get_input_embeddings() def get_output_embeddings(self): return self.rag.generator.get_output_embeddings() # Adapted from tf_t5's & tf_bart's _shift_right def shift_tokens_right(self, input_ids, start_token_id=None): """Shift input ids one token to the right, and pad with start_token_id""" if start_token_id is None: start_token_id = self.generator.config.decoder_start_token_id assert start_token_id is not None, ( "self.generator.config.decoder_start_token_id has to be defined. In Rag we commonly use Bart as" " generator, see Bart docs for more information" ) pad_token_id = self.generator.config.pad_token_id assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined." start_tokens = tf.fill((shape_list(input_ids)[0], 1), tf.cast(start_token_id, input_ids.dtype)) shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1) # replace possible -100 values in labels by `pad_token_id` shifted_input_ids = tf.where( shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), tf.cast(pad_token_id, input_ids.dtype)), shifted_input_ids, ) # "Verify that `labels` has only positive values and -100" assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.cast(0, shifted_input_ids.dtype)) # Make sure the assertion op is called by wrapping the result in an identity no-op with tf.control_dependencies([assert_gte0]): shifted_input_ids = tf.identity(shifted_input_ids) return shifted_input_ids # nll stands for 'negative log likelihood' def get_nll(self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, n_docs=None): n_docs = n_docs if n_docs is not None else self.config.n_docs # shift tokens left (from original Pytorch's version) target = tf.concat( [target[:, 1:], tf.fill([target.shape[0], 1], tf.cast(self.config.generator.pad_token_id, target.dtype))], axis=1, ) rag_logprobs = self.marginalize(seq_logits, doc_scores, n_docs) loss = self.hf_compute_loss(target, rag_logprobs, from_logits=True, reduce_loss=reduce_loss) return loss # Adopted modeling_tf_bart + add smooth_loss to match with pytorch version def hf_compute_loss(self, labels, y_pred, smooth_epsilon=0.0, from_logits=True, reduce_loss=False): """CrossEntropyLoss that ignores pad tokens""" # Matt: As written, this loss is not XLA-compatible, but it's doing some very weird things # and I don't feel comfortable converting it. loss_fn = keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=keras.losses.Reduction.SUM, ) if from_logits is False: # convert to logits eps = 1e-9 y_pred = tf.clip_by_value(y_pred, clip_value_min=eps, clip_value_max=1 - eps) y_pred = tf.math.log(y_pred) logits = y_pred melted_labels = tf.reshape(labels, (-1,)) active_loss = tf.not_equal(melted_labels, self.config.generator.pad_token_id) reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, logits.shape[2])), active_loss) labels = tf.boolean_mask(melted_labels, active_loss) nll_loss = loss_fn(labels, reduced_logits) smooth_loss = -tf.reduce_sum(reduced_logits, axis=-1) smooth_loss = tf.reduce_sum(smooth_loss) # sum and squeeze like torch eps_i = smooth_epsilon / reduced_logits.shape[-1] loss = (1.0 - smooth_epsilon) * nll_loss + eps_i * smooth_loss return loss def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "rag", None) is not None: with tf.name_scope(self.rag.name): self.rag.build(None) @add_start_docstrings_to_model_forward( """ A TF RAG-sequence model implementation. It performs RAG-sequence specific marginalization in the forward pass. """, RAG_START_DOCSTRING, ) class TFRagSequenceForGeneration(TFRagPreTrainedModel, TFCausalLanguageModelingLoss): load_weight_prefix = "tf_rag_sequence_for_generation_1/rag" def __init__( self, config: Optional[PretrainedConfig] = None, question_encoder: Optional[TFPreTrainedModel] = None, generator: Optional[TFPreTrainedModel] = None, retriever: Optional[RagRetriever] = None, **kwargs, ): assert config is not None or ( question_encoder is not None and generator is not None ), "Either a configuration or an encoder and a generator has to be provided." if config is None: config = RagConfig.from_question_encoder_generator_configs( question_encoder.config, generator.config, **kwargs ) super().__init__(config) # instantiate model self.rag = TFRagModel( config=config, question_encoder=question_encoder, generator=generator, retriever=retriever, load_weight_prefix=self.load_weight_prefix, name="rag", ) def set_retriever(self, retriever: RagRetriever): self.rag.retriever = retriever @property def retriever(self): return self.rag.retriever @property def generator(self): return self.rag.generator @property def question_encoder(self): return self.rag.question_encoder @unpack_inputs @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFRetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, decoder_input_ids: np.ndarray | tf.Tensor | None = None, decoder_attention_mask: np.ndarray | tf.Tensor | None = None, encoder_outputs: np.ndarray | tf.Tensor | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, doc_scores: np.ndarray | tf.Tensor | None = None, context_input_ids: np.ndarray | tf.Tensor | None = None, context_attention_mask: np.ndarray | tf.Tensor | None = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_retrieved: Optional[bool] = None, n_docs: Optional[int] = None, exclude_bos_score: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, reduce_loss: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, **kwargs, # needs kwargs for generation ) -> Union[Tuple[tf.Tensor], TFRetrievAugLMMarginOutput]: r""" exclude_bos_score (`bool`, *optional*): Only relevant if `labels` is passed. If `True`, the score of the BOS token is disregarded when computing the loss. labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the cross entropy classification loss according to Rag-Sequence model formulation See https://arxiv.org/pdf/2005.11401.pdf Section 2.1 for details about Rag-Sequence formulation. Indices should be in `[0, ..., config.vocab_size - 1]`. reduce_loss (`bool`, *optional*): Only relevant if `labels` is passed. If `True`, the NLL loss is reduced using the `tf.Tensor.sum` operation. kwargs (`Dict[str, any]`, *optional*, defaults to `{}`): Legacy dictionary, which is required so that model can use *generate()* function. Returns: Example: ```python >>> from transformers import AutoTokenizer, RagRetriever, TFRagSequenceForGeneration >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-sequence-nq") >>> retriever = RagRetriever.from_pretrained( ... "facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True ... ) >>> # initialize with RagRetriever to do everything in one forward call >>> model = TFRagSequenceForGeneration.from_pretrained( ... "facebook/rag-sequence-nq", retriever=retriever, from_pt=True ... ) >>> input_dict = tokenizer.prepare_seq2seq_batch( ... "How many people live in Paris?", "In Paris, there are 10 million people.", return_tensors="tf" ... ) >>> outputs = model(input_dict, output_retrieved=True) >>> # or use retriever separately >>> # 1. Encode >>> input_ids = input_dict["input_ids"] >>> question_hidden_states = model.question_encoder(input_ids)[0] >>> # 2. Retrieve >>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.numpy(), return_tensors="tf") >>> doc_scores = tf.squeeze( ... tf.matmul( ... tf.expand_dims(question_hidden_states, axis=1), docs_dict["retrieved_doc_embeds"], transpose_b=True ... ), ... axis=1, ... ) >>> # 3. Forward to generator >>> outputs = model( ... inputs=None, ... context_input_ids=docs_dict["context_input_ids"], ... context_attention_mask=docs_dict["context_attention_mask"], ... doc_scores=doc_scores, ... decoder_input_ids=input_dict["labels"], ... ) >>> # or directly generate >>> generated = model.generate( ... context_input_ids=docs_dict["context_input_ids"], ... context_attention_mask=docs_dict["context_attention_mask"], ... doc_scores=doc_scores, ... ) >>> generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True) ```""" assert ( "decoder_cached_states" not in kwargs ), "Please use past_key_values to cache intermediate outputs" # from modeling_tf_bart.py exclude_bos_score = exclude_bos_score if exclude_bos_score else self.config.exclude_bos_score reduce_loss = reduce_loss if reduce_loss else self.config.reduce_loss if labels is not None: if decoder_input_ids is None: decoder_input_ids = labels use_cache = False outputs = self.rag( input_ids, attention_mask=attention_mask, encoder_outputs=encoder_outputs, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, doc_scores=doc_scores, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_retrieved=output_retrieved, n_docs=n_docs, training=training, ) loss = None if labels is not None: loss = self.get_nll( outputs.logits, outputs.doc_scores, labels, reduce_loss=reduce_loss, epsilon=self.config.label_smoothing, n_docs=n_docs, ) return TFRetrievAugLMMarginOutput( loss=loss, logits=outputs.logits, doc_scores=outputs.doc_scores, past_key_values=outputs.past_key_values, context_input_ids=outputs.context_input_ids, context_attention_mask=outputs.context_attention_mask, retrieved_doc_embeds=outputs.retrieved_doc_embeds, retrieved_doc_ids=outputs.retrieved_doc_ids, question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state, question_enc_hidden_states=outputs.question_enc_hidden_states, question_enc_attentions=outputs.question_enc_attentions, generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state, generator_enc_hidden_states=outputs.generator_enc_hidden_states, generator_enc_attentions=outputs.generator_enc_attentions, generator_dec_hidden_states=outputs.generator_dec_hidden_states, generator_dec_attentions=outputs.generator_dec_attentions, ) def get_nll( self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, exclude_bos_score=False, n_docs=None ): # shift tokens left target = tf.concat( [target[:, 1:], tf.fill([target.shape[0], 1], tf.cast(self.config.generator.pad_token_id, target.dtype))], axis=1, ) # bos_token_id is None for T5 bos_token_id = self.config.bos_token_id or self.config.generator.bos_token_id n_docs = n_docs if n_docs is not None else self.config.n_docs equal_bos_token_id_all = tf.reduce_all(tf.equal(target[:, 0], bos_token_id)) use_bos = bos_token_id is not None and equal_bos_token_id_all def _mask_pads(ll, smooth_obj): pad_mask = tf.equal(target, tf.cast(self.config.generator.pad_token_id, target.dtype)) if tf.reduce_any(pad_mask): ll = tf.where(pad_mask, 0.0, ll) smooth_obj = tf.where(pad_mask, 0.0, smooth_obj) return tf.squeeze(ll, axis=-1), tf.squeeze(smooth_obj, axis=-1) # seq_logits.shape = (batch*n_docs, tgt_len , vocabs) seq_logprobs = tf.nn.log_softmax(seq_logits, axis=-1) seq_logprobs = tf.reshape( seq_logprobs, (seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.shape[-1]) ) # (batch_size, n_docs, tgt_len, vocabs) doc_logprobs = tf.nn.log_softmax(doc_scores, axis=1) doc_logprobs = tf.expand_dims(doc_logprobs, axis=-1) doc_logprobs = tf.expand_dims(doc_logprobs, axis=-1) # done twice to get 4-D # RAG-sequence marginalization first_token_scores = seq_logprobs[:, :, :1, :] second_token_scores = seq_logprobs[:, :, 1:2, :] remainder = seq_logprobs[:, :, 2:, :] rag_logprobs = tf.concat([first_token_scores, second_token_scores + doc_logprobs, remainder], axis=2) # calculate loss target = tf.expand_dims(target, axis=1) # n_docs dimension target = tf.expand_dims(target, axis=-1) # logits dimension target = tf.repeat(target, n_docs, axis=1) assert len(target.shape) == len(rag_logprobs.shape) # last-axis gathering only - use 2D-reshape-trick for Torch's style nD gathering def torch_gather(param, id_tensor): # 2d-gather torch equivalent: https://stackoverflow.com/questions/52129909/tensorflow-equivalent-of-torch-gather def gather2d(target, id_tensor): idx = tf.stack([tf.range(tf.shape(id_tensor)[0], dtype=id_tensor.dtype), id_tensor[:, 0]], axis=-1) result = tf.gather_nd(target, idx) return tf.expand_dims(result, axis=-1) target = tf.reshape(param, (-1, param.shape[-1])) # reshape 2D target_shape = id_tensor.shape id_tensor = tf.reshape(id_tensor, (-1, 1)) # also 2D-index result = gather2d(target, id_tensor) return tf.reshape(result, target_shape) ll = torch_gather(rag_logprobs, id_tensor=target) smooth_obj = tf.reduce_sum(rag_logprobs, axis=-1, keepdims=True) # total sum of all (normalised) logits ll, smooth_obj = _mask_pads(ll, smooth_obj) # sum over tokens, exclude bos while scoring if exclude_bos_score and use_bos: ll = tf.reduce_sum(ll[:, :, 1:], axis=2) else: ll = tf.reduce_sum(ll, axis=2) smooth_obj = tf.reduce_sum(smooth_obj, axis=2) ll = tf.math.reduce_logsumexp(ll, axis=1) # logsumexp over docs smooth_obj = tf.math.reduce_logsumexp(smooth_obj, axis=1) nll_loss = -ll smooth_loss = -smooth_obj if reduce_loss: nll_loss = tf.reduce_sum(nll_loss) smooth_loss = tf.reduce_sum(smooth_loss) eps_i = epsilon / rag_logprobs.shape[-1] loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss return loss def generate( self, input_ids: TFModelInputType | None = None, attention_mask: tf.Tensor | None = None, context_input_ids=None, context_attention_mask=None, doc_scores=None, do_deduplication=None, # defaults to True num_return_sequences=None, # defaults to 1 num_beams=None, # defaults to 1 n_docs=None, **model_kwargs, ): """ Implements RAG sequence "thorough" decoding. Read the [`~generation.GenerationMixin.generate`]` documentation for more information on how to set other generate input parameters Args: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): The sequence used as a prompt for the generation. If `input_ids` is not passed, then `context_input_ids` has to be provided. attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) context_input_ids (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Input IDs post-processed from the retrieved documents and the question encoder input_ids by the retriever. context_attention_mask (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. If the model has is not initialized with a `retriever` or `input_ids` is not given, `context_input_ids` and `context_attention_mask` have to be provided to the forward pass. They are returned by [`~RagRetriever.__call__`]. doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`): Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and `question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever` or `input_ids` is not given, `doc_scores` has to be provided to the forward pass. `doc_scores` are returned by [`~RagRetriever.__call__`]. do_deduplication (`bool`, *optional*): Whether or not to deduplicate the generations from different context documents for a given input. Has to be set to `False` if used while training with distributed backend. num_return_sequences(`int`, *optional*, defaults to 1): The number of independently computed returned sequences for each element in the batch. Note that this is not the value we pass to the `generator`'s `[`~generation.GenerationMixin.generate`]` function, where we set `num_return_sequences` to `num_beams`. num_beams (`int`, *optional*, defaults to 1): Number of beams for beam search. 1 means no beam search. n_docs (`int`, *optional*, defaults to `config.n_docs`) Number of documents to retrieve and/or number of documents for which to generate an answer. kwargs (`Dict[str, Any]`, *optional*): Additional kwargs will be passed to [`~generation.GenerationMixin.generate`] Return: `tf.Tensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated sequences. The second dimension (sequence length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. """ n_docs = n_docs if n_docs is not None else self.config.n_docs do_deduplication = do_deduplication if do_deduplication is not None else self.config.do_deduplication num_doc_return_sequences = ( num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences ) num_beams = num_beams if num_beams is not None else self.config.num_beams assert ( input_ids is not None or context_input_ids is not None ), " At least one of input_ids or context_input_ids must be given" if self.retriever is not None and context_input_ids is None: question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0] context_input_ids = self.retriever( input_ids, question_hidden_states.numpy(), prefix=self.generator.config.prefix, n_docs=n_docs, return_tensors="tf", )["context_input_ids"] hypos = [] model_kwargs["num_beams"] = num_beams model_kwargs["num_return_sequences"] = num_beams # put here so that not confused with num_doc_return_sequences model_kwargs["attention_mask"] = None batch_size = input_ids.shape[0] if input_ids is not None else context_input_ids.shape[0] // n_docs for index in range(batch_size): # first, generate beams from documents: generator_input_ids = context_input_ids[index * n_docs : (index + 1) * n_docs] # (n_docs, max_len) output_sequences = self.generator.generate( generator_input_ids, **model_kwargs, ) # n_docs * n_beam, tgt_len if do_deduplication: # do_deduplication -- for TF, work on Eager mode only! output_sequences = tf.stack(list({str(k.numpy().tolist()): k for k in output_sequences}.values())) num_candidates = output_sequences.shape[ 0 ] # after deduplication, this number can be less than n_docs*n_beam # then, run model forwards to get nll scores: if input_ids is not None: new_input_ids = tf.tile(input_ids[index : index + 1], (num_candidates, 1)) outputs = self(new_input_ids, labels=output_sequences, exclude_bos_score=True) else: # input_ids is None, need context_input_ids/mask and doc_scores assert context_attention_mask is not None, ( "Make sure that `context_attention_mask` are passed, if no `input_ids` is set. Alternatively, you" " can set a retriever using the `set_retriever(...)` function." ) assert doc_scores is not None, ( "Make sure that `doc_scores` are passed, if no `input_ids` is set. Alternatively, you can set a" " retriever using the `set_retriever(...)` function." ) individual_input_ids = tf.tile( generator_input_ids, (num_candidates, 1) ) # (num_candidates*n_docs, max_len) individual_attention_mask = context_attention_mask[index * n_docs : (index + 1) * n_docs] individual_attention_mask = tf.tile(individual_attention_mask, (num_candidates, 1)) individual_doc_scores = doc_scores[index : (index + 1), :] # doc_scores.shape = [batch, n_docs] individual_doc_scores = tf.tile(individual_doc_scores, (num_candidates, 1)) # [num_candidates, n_docs] outputs = self( input_ids=None, context_input_ids=individual_input_ids, context_attention_mask=individual_attention_mask, doc_scores=individual_doc_scores, labels=output_sequences, exclude_bos_score=True, ) top_cand_inds = tf.math.top_k((-outputs["loss"]), k=num_doc_return_sequences)[1] # add hypothesis hypos.append(tf.gather(output_sequences, top_cand_inds)) return self._cat_and_pad(hypos, pad_token_id=self.config.generator.pad_token_id) @staticmethod def _cat_and_pad(tensors, pad_token_id): # used by generate(): tensors is a (batched) list of (candidates, len); len is varied across batch # Initialize padded tensor with shape ( all_candidates , max_candidate_length ), # where all_candidates counted from all inputs new_shape = sum([t.shape[0] for t in tensors]), max([t.shape[1] for t in tensors]) output = tf.fill(new_shape, pad_token_id) # Normal tensor doesn't support slice assignment, so we need tf.Variable output = tf.Variable(output) # Assign, and then convert back to tensor ind = 0 for t in tensors: output[ind : ind + t.shape[0], : t.shape[1]].assign(t) ind += t.shape[0] output = tf.convert_to_tensor(output) return tf.cast(output, tensors[0][0][0].dtype) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "rag", None) is not None: with tf.name_scope(self.rag.name): self.rag.build(None) __all__ = ["TFRagModel", "TFRagPreTrainedModel", "TFRagSequenceForGeneration", "TFRagTokenForGeneration"]
transformers/src/transformers/models/rag/modeling_tf_rag.py/0
{ "file_path": "transformers/src/transformers/models/rag/modeling_tf_rag.py", "repo_id": "transformers", "token_count": 38121 }
131
# coding=utf-8 # Copyright 2022 Microsoft Research, Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TensorFlow ResNet model.""" from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACT2FN from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFImageClassifierOutputWithNoAttention, ) from ...modeling_tf_utils import ( TFPreTrainedModel, TFSequenceClassificationLoss, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import shape_list from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_resnet import ResNetConfig logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "ResNetConfig" # Base docstring _CHECKPOINT_FOR_DOC = "microsoft/resnet-50" _EXPECTED_OUTPUT_SHAPE = [1, 2048, 7, 7] # Image classification docstring _IMAGE_CLASS_CHECKPOINT = "microsoft/resnet-50" _IMAGE_CLASS_EXPECTED_OUTPUT = "tiger cat" class TFResNetConvLayer(keras.layers.Layer): def __init__( self, in_channels: int, out_channels: int, kernel_size: int = 3, stride: int = 1, activation: str = "relu", **kwargs, ) -> None: super().__init__(**kwargs) self.pad_value = kernel_size // 2 self.conv = keras.layers.Conv2D( out_channels, kernel_size=kernel_size, strides=stride, padding="valid", use_bias=False, name="convolution" ) # Use same default momentum and epsilon as PyTorch equivalent self.normalization = keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9, name="normalization") self.activation = ACT2FN[activation] if activation is not None else keras.layers.Activation("linear") self.in_channels = in_channels self.out_channels = out_channels def convolution(self, hidden_state: tf.Tensor) -> tf.Tensor: # Pad to match that done in the PyTorch Conv2D model height_pad = width_pad = (self.pad_value, self.pad_value) hidden_state = tf.pad(hidden_state, [(0, 0), height_pad, width_pad, (0, 0)]) hidden_state = self.conv(hidden_state) return hidden_state def call(self, hidden_state: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_state = self.convolution(hidden_state) hidden_state = self.normalization(hidden_state, training=training) hidden_state = self.activation(hidden_state) return hidden_state def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "conv", None) is not None: with tf.name_scope(self.conv.name): self.conv.build([None, None, None, self.in_channels]) if getattr(self, "normalization", None) is not None: with tf.name_scope(self.normalization.name): self.normalization.build([None, None, None, self.out_channels]) class TFResNetEmbeddings(keras.layers.Layer): """ ResNet Embeddings (stem) composed of a single aggressive convolution. """ def __init__(self, config: ResNetConfig, **kwargs) -> None: super().__init__(**kwargs) self.embedder = TFResNetConvLayer( config.num_channels, config.embedding_size, kernel_size=7, stride=2, activation=config.hidden_act, name="embedder", ) self.pooler = keras.layers.MaxPool2D(pool_size=3, strides=2, padding="valid", name="pooler") self.num_channels = config.num_channels def call(self, pixel_values: tf.Tensor, training: bool = False) -> tf.Tensor: _, _, _, num_channels = shape_list(pixel_values) if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) hidden_state = pixel_values hidden_state = self.embedder(hidden_state) hidden_state = tf.pad(hidden_state, [[0, 0], [1, 1], [1, 1], [0, 0]]) hidden_state = self.pooler(hidden_state) return hidden_state def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embedder", None) is not None: with tf.name_scope(self.embedder.name): self.embedder.build(None) if getattr(self, "pooler", None) is not None: with tf.name_scope(self.pooler.name): self.pooler.build(None) class TFResNetShortCut(keras.layers.Layer): """ ResNet shortcut, used to project the residual features to the correct size. If needed, it is also used to downsample the input using `stride=2`. """ def __init__(self, in_channels: int, out_channels: int, stride: int = 2, **kwargs) -> None: super().__init__(**kwargs) self.convolution = keras.layers.Conv2D( out_channels, kernel_size=1, strides=stride, use_bias=False, name="convolution" ) # Use same default momentum and epsilon as PyTorch equivalent self.normalization = keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9, name="normalization") self.in_channels = in_channels self.out_channels = out_channels def call(self, x: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_state = x hidden_state = self.convolution(hidden_state) hidden_state = self.normalization(hidden_state, training=training) return hidden_state def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "convolution", None) is not None: with tf.name_scope(self.convolution.name): self.convolution.build([None, None, None, self.in_channels]) if getattr(self, "normalization", None) is not None: with tf.name_scope(self.normalization.name): self.normalization.build([None, None, None, self.out_channels]) class TFResNetBasicLayer(keras.layers.Layer): """ A classic ResNet's residual layer composed by two `3x3` convolutions. """ def __init__( self, in_channels: int, out_channels: int, stride: int = 1, activation: str = "relu", **kwargs ) -> None: super().__init__(**kwargs) should_apply_shortcut = in_channels != out_channels or stride != 1 self.conv1 = TFResNetConvLayer(in_channels, out_channels, stride=stride, name="layer.0") self.conv2 = TFResNetConvLayer(out_channels, out_channels, activation=None, name="layer.1") self.shortcut = ( TFResNetShortCut(in_channels, out_channels, stride=stride, name="shortcut") if should_apply_shortcut else keras.layers.Activation("linear", name="shortcut") ) self.activation = ACT2FN[activation] def call(self, hidden_state: tf.Tensor, training: bool = False) -> tf.Tensor: residual = hidden_state hidden_state = self.conv1(hidden_state, training=training) hidden_state = self.conv2(hidden_state, training=training) residual = self.shortcut(residual, training=training) hidden_state += residual hidden_state = self.activation(hidden_state) return hidden_state def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "conv1", None) is not None: with tf.name_scope(self.conv1.name): self.conv1.build(None) if getattr(self, "conv2", None) is not None: with tf.name_scope(self.conv2.name): self.conv2.build(None) if getattr(self, "shortcut", None) is not None: with tf.name_scope(self.shortcut.name): self.shortcut.build(None) class TFResNetBottleNeckLayer(keras.layers.Layer): """ A classic ResNet's bottleneck layer composed by three `3x3` convolutions. The first `1x1` convolution reduces the input by a factor of `reduction` in order to make the second `3x3` convolution faster. The last `1x1` convolution remaps the reduced features to `out_channels`. """ def __init__( self, in_channels: int, out_channels: int, stride: int = 1, activation: str = "relu", reduction: int = 4, **kwargs, ) -> None: super().__init__(**kwargs) should_apply_shortcut = in_channels != out_channels or stride != 1 reduces_channels = out_channels // reduction self.conv0 = TFResNetConvLayer(in_channels, reduces_channels, kernel_size=1, name="layer.0") self.conv1 = TFResNetConvLayer(reduces_channels, reduces_channels, stride=stride, name="layer.1") self.conv2 = TFResNetConvLayer(reduces_channels, out_channels, kernel_size=1, activation=None, name="layer.2") self.shortcut = ( TFResNetShortCut(in_channels, out_channels, stride=stride, name="shortcut") if should_apply_shortcut else keras.layers.Activation("linear", name="shortcut") ) self.activation = ACT2FN[activation] def call(self, hidden_state: tf.Tensor, training: bool = False) -> tf.Tensor: residual = hidden_state hidden_state = self.conv0(hidden_state, training=training) hidden_state = self.conv1(hidden_state, training=training) hidden_state = self.conv2(hidden_state, training=training) residual = self.shortcut(residual, training=training) hidden_state += residual hidden_state = self.activation(hidden_state) return hidden_state def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "conv0", None) is not None: with tf.name_scope(self.conv0.name): self.conv0.build(None) if getattr(self, "conv1", None) is not None: with tf.name_scope(self.conv1.name): self.conv1.build(None) if getattr(self, "conv2", None) is not None: with tf.name_scope(self.conv2.name): self.conv2.build(None) if getattr(self, "shortcut", None) is not None: with tf.name_scope(self.shortcut.name): self.shortcut.build(None) class TFResNetStage(keras.layers.Layer): """ A ResNet stage composed of stacked layers. """ def __init__( self, config: ResNetConfig, in_channels: int, out_channels: int, stride: int = 2, depth: int = 2, **kwargs ) -> None: super().__init__(**kwargs) layer = TFResNetBottleNeckLayer if config.layer_type == "bottleneck" else TFResNetBasicLayer layers = [layer(in_channels, out_channels, stride=stride, activation=config.hidden_act, name="layers.0")] layers += [ layer(out_channels, out_channels, activation=config.hidden_act, name=f"layers.{i + 1}") for i in range(depth - 1) ] self.stage_layers = layers def call(self, hidden_state: tf.Tensor, training: bool = False) -> tf.Tensor: for layer in self.stage_layers: hidden_state = layer(hidden_state, training=training) return hidden_state def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "stage_layers", None) is not None: for layer in self.stage_layers: with tf.name_scope(layer.name): layer.build(None) class TFResNetEncoder(keras.layers.Layer): def __init__(self, config: ResNetConfig, **kwargs) -> None: super().__init__(**kwargs) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages = [ TFResNetStage( config, config.embedding_size, config.hidden_sizes[0], stride=2 if config.downsample_in_first_stage else 1, depth=config.depths[0], name="stages.0", ) ] for i, (in_channels, out_channels, depth) in enumerate( zip(config.hidden_sizes, config.hidden_sizes[1:], config.depths[1:]) ): self.stages.append(TFResNetStage(config, in_channels, out_channels, depth=depth, name=f"stages.{i + 1}")) def call( self, hidden_state: tf.Tensor, output_hidden_states: bool = False, return_dict: bool = True, training: bool = False, ) -> TFBaseModelOutputWithNoAttention: hidden_states = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: hidden_states = hidden_states + (hidden_state,) hidden_state = stage_module(hidden_state, training=training) if output_hidden_states: hidden_states = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None) return TFBaseModelOutputWithNoAttention(last_hidden_state=hidden_state, hidden_states=hidden_states) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "stages", None) is not None: for layer in self.stages: with tf.name_scope(layer.name): layer.build(None) class TFResNetPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = ResNetConfig base_model_prefix = "resnet" main_input_name = "pixel_values" @property def input_signature(self): return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224), dtype=tf.float32)} RESNET_START_DOCSTRING = r""" This model is a TensorFlow [keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular TensorFlow Module and refer to the TensorFlow documentation for all matter related to general usage and behavior. Parameters: config ([`ResNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ RESNET_INPUTS_DOCSTRING = r""" Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @keras_serializable class TFResNetMainLayer(keras.layers.Layer): config_class = ResNetConfig def __init__(self, config: ResNetConfig, **kwargs) -> None: super().__init__(**kwargs) self.config = config self.embedder = TFResNetEmbeddings(config, name="embedder") self.encoder = TFResNetEncoder(config, name="encoder") self.pooler = keras.layers.GlobalAveragePooling2D(keepdims=True) @unpack_inputs def call( self, pixel_values: tf.Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[Tuple[tf.Tensor], TFBaseModelOutputWithPoolingAndNoAttention]: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # TF 2.0 image layers can't use NCHW format when running on CPU. # We transpose to NHWC format and then transpose back after the full forward pass. # (batch_size, num_channels, height, width) -> (batch_size, height, width, num_channels) pixel_values = tf.transpose(pixel_values, perm=[0, 2, 3, 1]) embedding_output = self.embedder(pixel_values, training=training) encoder_outputs = self.encoder( embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training ) last_hidden_state = encoder_outputs[0] pooled_output = self.pooler(last_hidden_state) # Transpose all the outputs to the NCHW format # (batch_size, height, width, num_channels) -> (batch_size, num_channels, height, width) last_hidden_state = tf.transpose(last_hidden_state, (0, 3, 1, 2)) pooled_output = tf.transpose(pooled_output, (0, 3, 1, 2)) hidden_states = () for hidden_state in encoder_outputs[1:]: hidden_states = hidden_states + tuple(tf.transpose(h, (0, 3, 1, 2)) for h in hidden_state) if not return_dict: return (last_hidden_state, pooled_output) + hidden_states hidden_states = hidden_states if output_hidden_states else None return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=hidden_states, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embedder", None) is not None: with tf.name_scope(self.embedder.name): self.embedder.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) @add_start_docstrings( "The bare ResNet model outputting raw features without any specific head on top.", RESNET_START_DOCSTRING, ) class TFResNetModel(TFResNetPreTrainedModel): def __init__(self, config: ResNetConfig, **kwargs) -> None: super().__init__(config, **kwargs) self.resnet = TFResNetMainLayer(config=config, name="resnet") @add_start_docstrings_to_model_forward(RESNET_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPoolingAndNoAttention, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) @unpack_inputs def call( self, pixel_values: tf.Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[Tuple[tf.Tensor], TFBaseModelOutputWithPoolingAndNoAttention]: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict resnet_outputs = self.resnet( pixel_values=pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return resnet_outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "resnet", None) is not None: with tf.name_scope(self.resnet.name): self.resnet.build(None) @add_start_docstrings( """ ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """, RESNET_START_DOCSTRING, ) class TFResNetForImageClassification(TFResNetPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config: ResNetConfig, **kwargs) -> None: super().__init__(config, **kwargs) self.num_labels = config.num_labels self.resnet = TFResNetMainLayer(config, name="resnet") # classification head self.classifier_layer = ( keras.layers.Dense(config.num_labels, name="classifier.1") if config.num_labels > 0 else keras.layers.Activation("linear", name="classifier.1") ) self.config = config def classifier(self, x: tf.Tensor) -> tf.Tensor: x = keras.layers.Flatten()(x) logits = self.classifier_layer(x) return logits @add_start_docstrings_to_model_forward(RESNET_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=TFImageClassifierOutputWithNoAttention, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) @unpack_inputs def call( self, pixel_values: tf.Tensor = None, labels: tf.Tensor = None, output_hidden_states: bool = None, return_dict: bool = None, training: bool = False, ) -> Union[Tuple[tf.Tensor], TFImageClassifierOutputWithNoAttention]: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.resnet( pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training ) pooled_output = outputs.pooler_output if return_dict else outputs[1] logits = self.classifier(pooled_output) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TFImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "resnet", None) is not None: with tf.name_scope(self.resnet.name): self.resnet.build(None) if getattr(self, "classifier_layer", None) is not None: with tf.name_scope(self.classifier_layer.name): self.classifier_layer.build([None, None, self.config.hidden_sizes[-1]]) __all__ = ["TFResNetForImageClassification", "TFResNetModel", "TFResNetPreTrainedModel"]
transformers/src/transformers/models/resnet/modeling_tf_resnet.py/0
{ "file_path": "transformers/src/transformers/models/resnet/modeling_tf_resnet.py", "repo_id": "transformers", "token_count": 10026 }
132
# coding=utf-8 # Copyright 2022 WeChatAI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """RoCBert model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class RoCBertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`RoCBertModel`]. It is used to instantiate a RoCBert model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the RoCBert [weiweishi/roc-bert-base-zh](https://huggingface.co/weiweishi/roc-bert-base-zh) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the RoCBert model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`RoCBertModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimension of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`RoCBertModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. is_decoder (`bool`, *optional*, defaults to `False`): Whether the model is used as a decoder or not. If `False`, the model is used as an encoder. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. position_embedding_type (`str`, *optional*, defaults to `"absolute"`): Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155). For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658). classifier_dropout (`float`, *optional*): The dropout ratio for the classification head. enable_pronunciation (`bool`, *optional*, defaults to `True`): Whether or not the model use pronunciation embed when training. enable_shape (`bool`, *optional*, defaults to `True`): Whether or not the model use shape embed when training. pronunciation_embed_dim (`int`, *optional*, defaults to 768): Dimension of the pronunciation_embed. pronunciation_vocab_size (`int`, *optional*, defaults to 910): Pronunciation Vocabulary size of the RoCBert model. Defines the number of different tokens that can be represented by the `input_pronunciation_ids` passed when calling [`RoCBertModel`]. shape_embed_dim (`int`, *optional*, defaults to 512): Dimension of the shape_embed. shape_vocab_size (`int`, *optional*, defaults to 24858): Shape Vocabulary size of the RoCBert model. Defines the number of different tokens that can be represented by the `input_shape_ids` passed when calling [`RoCBertModel`]. concat_input (`bool`, *optional*, defaults to `True`): Defines the way of merging the shape_embed, pronunciation_embed and word_embed, if the value is true, output_embed = torch.cat((word_embed, shape_embed, pronunciation_embed), -1), else output_embed = (word_embed + shape_embed + pronunciation_embed) / 3 Example: ```python >>> from transformers import RoCBertModel, RoCBertConfig >>> # Initializing a RoCBert weiweishi/roc-bert-base-zh style configuration >>> configuration = RoCBertConfig() >>> # Initializing a model from the weiweishi/roc-bert-base-zh style configuration >>> model = RoCBertModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "roc_bert" def __init__( self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, use_cache=True, pad_token_id=0, position_embedding_type="absolute", classifier_dropout=None, enable_pronunciation=True, enable_shape=True, pronunciation_embed_dim=768, pronunciation_vocab_size=910, shape_embed_dim=512, shape_vocab_size=24858, concat_input=True, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.type_vocab_size = type_vocab_size self.layer_norm_eps = layer_norm_eps self.use_cache = use_cache self.enable_pronunciation = enable_pronunciation self.enable_shape = enable_shape self.pronunciation_embed_dim = pronunciation_embed_dim self.pronunciation_vocab_size = pronunciation_vocab_size self.shape_embed_dim = shape_embed_dim self.shape_vocab_size = shape_vocab_size self.concat_input = concat_input self.position_embedding_type = position_embedding_type self.classifier_dropout = classifier_dropout super().__init__(pad_token_id=pad_token_id, **kwargs) __all__ = ["RoCBertConfig"]
transformers/src/transformers/models/roc_bert/configuration_roc_bert.py/0
{ "file_path": "transformers/src/transformers/models/roc_bert/configuration_roc_bert.py", "repo_id": "transformers", "token_count": 3160 }
133
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for RT-DETR.""" import pathlib from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...feature_extraction_utils import BatchFeature from ...image_processing_utils import BaseImageProcessor, get_size_dict from ...image_transforms import ( PaddingMode, center_to_corners_format, corners_to_center_format, pad, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, AnnotationFormat, AnnotationType, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_annotations, validate_preprocess_arguments, ) from ...utils import ( filter_out_non_signature_kwargs, is_flax_available, is_jax_tensor, is_tf_available, is_tf_tensor, is_torch_available, is_torch_tensor, logging, requires_backends, ) from ...utils.generic import TensorType if is_torch_available(): import torch logger = logging.get_logger(__name__) # pylint: disable=invalid-name SUPPORTED_ANNOTATION_FORMATS = (AnnotationFormat.COCO_DETECTION,) # Copied from transformers.models.detr.image_processing_detr.get_size_with_aspect_ratio def get_size_with_aspect_ratio(image_size, size, max_size=None) -> Tuple[int, int]: """ Computes the output image size given the input image size and the desired output size. Args: image_size (`Tuple[int, int]`): The input image size. size (`int`): The desired output size. max_size (`int`, *optional*): The maximum allowed output size. """ height, width = image_size raw_size = None if max_size is not None: min_original_size = float(min((height, width))) max_original_size = float(max((height, width))) if max_original_size / min_original_size * size > max_size: raw_size = max_size * min_original_size / max_original_size size = int(round(raw_size)) if (height <= width and height == size) or (width <= height and width == size): oh, ow = height, width elif width < height: ow = size if max_size is not None and raw_size is not None: oh = int(raw_size * height / width) else: oh = int(size * height / width) else: oh = size if max_size is not None and raw_size is not None: ow = int(raw_size * width / height) else: ow = int(size * width / height) return (oh, ow) # Copied from transformers.models.detr.image_processing_detr.get_resize_output_image_size def get_resize_output_image_size( input_image: np.ndarray, size: Union[int, Tuple[int, int], List[int]], max_size: Optional[int] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> Tuple[int, int]: """ Computes the output image size given the input image size and the desired output size. If the desired output size is a tuple or list, the output image size is returned as is. If the desired output size is an integer, the output image size is computed by keeping the aspect ratio of the input image size. Args: input_image (`np.ndarray`): The image to resize. size (`int` or `Tuple[int, int]` or `List[int]`): The desired output size. max_size (`int`, *optional*): The maximum allowed output size. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred from the input image. """ image_size = get_image_size(input_image, input_data_format) if isinstance(size, (list, tuple)): return size return get_size_with_aspect_ratio(image_size, size, max_size) # Copied from transformers.models.detr.image_processing_detr.get_image_size_for_max_height_width def get_image_size_for_max_height_width( input_image: np.ndarray, max_height: int, max_width: int, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> Tuple[int, int]: """ Computes the output image size given the input image and the maximum allowed height and width. Keep aspect ratio. Important, even if image_height < max_height and image_width < max_width, the image will be resized to at least one of the edges be equal to max_height or max_width. For example: - input_size: (100, 200), max_height: 50, max_width: 50 -> output_size: (25, 50) - input_size: (100, 200), max_height: 200, max_width: 500 -> output_size: (200, 400) Args: input_image (`np.ndarray`): The image to resize. max_height (`int`): The maximum allowed height. max_width (`int`): The maximum allowed width. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred from the input image. """ image_size = get_image_size(input_image, input_data_format) height, width = image_size height_scale = max_height / height width_scale = max_width / width min_scale = min(height_scale, width_scale) new_height = int(height * min_scale) new_width = int(width * min_scale) return new_height, new_width # Copied from transformers.models.detr.image_processing_detr.get_numpy_to_framework_fn def get_numpy_to_framework_fn(arr) -> Callable: """ Returns a function that converts a numpy array to the framework of the input array. Args: arr (`np.ndarray`): The array to convert. """ if isinstance(arr, np.ndarray): return np.array if is_tf_available() and is_tf_tensor(arr): import tensorflow as tf return tf.convert_to_tensor if is_torch_available() and is_torch_tensor(arr): import torch return torch.tensor if is_flax_available() and is_jax_tensor(arr): import jax.numpy as jnp return jnp.array raise ValueError(f"Cannot convert arrays of type {type(arr)}") # Copied from transformers.models.detr.image_processing_detr.safe_squeeze def safe_squeeze(arr: np.ndarray, axis: Optional[int] = None) -> np.ndarray: """ Squeezes an array, but only if the axis specified has dim 1. """ if axis is None: return arr.squeeze() try: return arr.squeeze(axis=axis) except ValueError: return arr # Copied from transformers.models.detr.image_processing_detr.normalize_annotation def normalize_annotation(annotation: Dict, image_size: Tuple[int, int]) -> Dict: image_height, image_width = image_size norm_annotation = {} for key, value in annotation.items(): if key == "boxes": boxes = value boxes = corners_to_center_format(boxes) boxes /= np.asarray([image_width, image_height, image_width, image_height], dtype=np.float32) norm_annotation[key] = boxes else: norm_annotation[key] = value return norm_annotation # Copied from transformers.models.detr.image_processing_detr.max_across_indices def max_across_indices(values: Iterable[Any]) -> List[Any]: """ Return the maximum value across all indices of an iterable of values. """ return [max(values_i) for values_i in zip(*values)] # Copied from transformers.models.detr.image_processing_detr.get_max_height_width def get_max_height_width( images: List[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]] = None ) -> List[int]: """ Get the maximum height and width across all images in a batch. """ if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) if input_data_format == ChannelDimension.FIRST: _, max_height, max_width = max_across_indices([img.shape for img in images]) elif input_data_format == ChannelDimension.LAST: max_height, max_width, _ = max_across_indices([img.shape for img in images]) else: raise ValueError(f"Invalid channel dimension format: {input_data_format}") return (max_height, max_width) # Copied from transformers.models.detr.image_processing_detr.make_pixel_mask def make_pixel_mask( image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None ) -> np.ndarray: """ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding. Args: image (`np.ndarray`): Image to make the pixel mask for. output_size (`Tuple[int, int]`): Output size of the mask. """ input_height, input_width = get_image_size(image, channel_dim=input_data_format) mask = np.zeros(output_size, dtype=np.int64) mask[:input_height, :input_width] = 1 return mask def prepare_coco_detection_annotation( image, target, return_segmentation_masks: bool = False, input_data_format: Optional[Union[ChannelDimension, str]] = None, ): """ Convert the target in COCO format into the format expected by RTDETR. """ image_height, image_width = get_image_size(image, channel_dim=input_data_format) image_id = target["image_id"] image_id = np.asarray([image_id], dtype=np.int64) # Get all COCO annotations for the given image. annotations = target["annotations"] annotations = [obj for obj in annotations if "iscrowd" not in obj or obj["iscrowd"] == 0] classes = [obj["category_id"] for obj in annotations] classes = np.asarray(classes, dtype=np.int64) # for conversion to coco api area = np.asarray([obj["area"] for obj in annotations], dtype=np.float32) iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in annotations], dtype=np.int64) boxes = [obj["bbox"] for obj in annotations] # guard against no boxes via resizing boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4) boxes[:, 2:] += boxes[:, :2] boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=image_width) boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=image_height) keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) new_target = {} new_target["image_id"] = image_id new_target["class_labels"] = classes[keep] new_target["boxes"] = boxes[keep] new_target["area"] = area[keep] new_target["iscrowd"] = iscrowd[keep] new_target["orig_size"] = np.asarray([int(image_height), int(image_width)], dtype=np.int64) if annotations and "keypoints" in annotations[0]: keypoints = [obj["keypoints"] for obj in annotations] # Converting the filtered keypoints list to a numpy array keypoints = np.asarray(keypoints, dtype=np.float32) # Apply the keep mask here to filter the relevant annotations keypoints = keypoints[keep] num_keypoints = keypoints.shape[0] keypoints = keypoints.reshape((-1, 3)) if num_keypoints else keypoints new_target["keypoints"] = keypoints return new_target # Copied from transformers.models.detr.image_processing_detr.resize_annotation def resize_annotation( annotation: Dict[str, Any], orig_size: Tuple[int, int], target_size: Tuple[int, int], threshold: float = 0.5, resample: PILImageResampling = PILImageResampling.NEAREST, ): """ Resizes an annotation to a target size. Args: annotation (`Dict[str, Any]`): The annotation dictionary. orig_size (`Tuple[int, int]`): The original size of the input image. target_size (`Tuple[int, int]`): The target size of the image, as returned by the preprocessing `resize` step. threshold (`float`, *optional*, defaults to 0.5): The threshold used to binarize the segmentation masks. resample (`PILImageResampling`, defaults to `PILImageResampling.NEAREST`): The resampling filter to use when resizing the masks. """ ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(target_size, orig_size)) ratio_height, ratio_width = ratios new_annotation = {} new_annotation["size"] = target_size for key, value in annotation.items(): if key == "boxes": boxes = value scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32) new_annotation["boxes"] = scaled_boxes elif key == "area": area = value scaled_area = area * (ratio_width * ratio_height) new_annotation["area"] = scaled_area elif key == "masks": masks = value[:, None] masks = np.array([resize(mask, target_size, resample=resample) for mask in masks]) masks = masks.astype(np.float32) masks = masks[:, 0] > threshold new_annotation["masks"] = masks elif key == "size": new_annotation["size"] = target_size else: new_annotation[key] = value return new_annotation class RTDetrImageProcessor(BaseImageProcessor): r""" Constructs a RT-DETR image processor. Args: format (`str`, *optional*, defaults to `AnnotationFormat.COCO_DETECTION`): Data format of the annotations. One of "coco_detection" or "coco_panoptic". do_resize (`bool`, *optional*, defaults to `True`): Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`Dict[str, int]` *optional*, defaults to `{"height": 640, "width": 640}`): Size of the image's `(height, width)` dimensions after resizing. Can be overridden by the `size` parameter in the `preprocess` method. Available options are: - `{"height": int, "width": int}`: The image will be resized to the exact size `(height, width)`. Do NOT keep the aspect ratio. - `{"shortest_edge": int, "longest_edge": int}`: The image will be resized to a maximum size respecting the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge less or equal to `longest_edge`. - `{"max_height": int, "max_width": int}`: The image will be resized to the maximum size respecting the aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to `max_width`. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use if resizing the image. do_rescale (`bool`, *optional*, defaults to `True`): Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `False`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`): Mean values to use when normalizing the image. Can be a single value or a list of values, one for each channel. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`): Standard deviation values to use when normalizing the image. Can be a single value or a list of values, one for each channel. Can be overridden by the `image_std` parameter in the `preprocess` method. do_convert_annotations (`bool`, *optional*, defaults to `True`): Controls whether to convert the annotations to the format expected by the DETR model. Converts the bounding boxes to the format `(center_x, center_y, width, height)` and in the range `[0, 1]`. Can be overridden by the `do_convert_annotations` parameter in the `preprocess` method. do_pad (`bool`, *optional*, defaults to `False`): Controls whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess` method. If `True`, padding will be applied to the bottom and right of the image with zeros. If `pad_size` is provided, the image will be padded to the specified dimensions. Otherwise, the image will be padded to the maximum height and width of the batch. pad_size (`Dict[str, int]`, *optional*): The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest height and width in the batch. """ model_input_names = ["pixel_values", "pixel_mask"] def __init__( self, format: Union[str, AnnotationFormat] = AnnotationFormat.COCO_DETECTION, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = False, image_mean: Union[float, List[float]] = None, image_std: Union[float, List[float]] = None, do_convert_annotations: bool = True, do_pad: bool = False, pad_size: Optional[Dict[str, int]] = None, **kwargs, ) -> None: size = size if size is not None else {"height": 640, "width": 640} size = get_size_dict(size, default_to_square=False) if do_convert_annotations is None: do_convert_annotations = do_normalize super().__init__(**kwargs) self.format = format self.do_resize = do_resize self.size = size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.do_convert_annotations = do_convert_annotations self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD self.do_pad = do_pad self.pad_size = pad_size def prepare_annotation( self, image: np.ndarray, target: Dict, format: Optional[AnnotationFormat] = None, return_segmentation_masks: bool = None, masks_path: Optional[Union[str, pathlib.Path]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> Dict: """ Prepare an annotation for feeding into RTDETR model. """ format = format if format is not None else self.format if format == AnnotationFormat.COCO_DETECTION: return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks target = prepare_coco_detection_annotation( image, target, return_segmentation_masks, input_data_format=input_data_format ) else: raise ValueError(f"Format {format} is not supported.") return target # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BILINEAR, data_format: Optional[ChannelDimension] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an int, smaller edge of the image will be matched to this number. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Size of the image's `(height, width)` dimensions after resizing. Available options are: - `{"height": int, "width": int}`: The image will be resized to the exact size `(height, width)`. Do NOT keep the aspect ratio. - `{"shortest_edge": int, "longest_edge": int}`: The image will be resized to a maximum size respecting the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge less or equal to `longest_edge`. - `{"max_height": int, "max_width": int}`: The image will be resized to the maximum size respecting the aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to `max_width`. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use if resizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ if "max_size" in kwargs: logger.warning_once( "The `max_size` parameter is deprecated and will be removed in v4.26. " "Please specify in `size['longest_edge'] instead`.", ) max_size = kwargs.pop("max_size") else: max_size = None size = get_size_dict(size, max_size=max_size, default_to_square=False) if "shortest_edge" in size and "longest_edge" in size: new_size = get_resize_output_image_size( image, size["shortest_edge"], size["longest_edge"], input_data_format=input_data_format ) elif "max_height" in size and "max_width" in size: new_size = get_image_size_for_max_height_width( image, size["max_height"], size["max_width"], input_data_format=input_data_format ) elif "height" in size and "width" in size: new_size = (size["height"], size["width"]) else: raise ValueError( "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got" f" {size.keys()}." ) image = resize( image, size=new_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) return image # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize_annotation def resize_annotation( self, annotation, orig_size, size, resample: PILImageResampling = PILImageResampling.NEAREST, ) -> Dict: """ Resize the annotation to match the resized image. If size is an int, smaller edge of the mask will be matched to this number. """ return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.rescale def rescale( self, image: np.ndarray, rescale_factor: float, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Rescale the image by the given factor. image = image * rescale_factor. Args: image (`np.ndarray`): Image to rescale. rescale_factor (`float`): The value to use for rescaling. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. If unset, is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. """ return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.normalize_annotation def normalize_annotation(self, annotation: Dict, image_size: Tuple[int, int]) -> Dict: """ Normalize the boxes in the annotation from `[top_left_x, top_left_y, bottom_right_x, bottom_right_y]` to `[center_x, center_y, width, height]` format and from absolute to relative pixel values. """ return normalize_annotation(annotation, image_size=image_size) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._update_annotation_for_padded_image def _update_annotation_for_padded_image( self, annotation: Dict, input_image_size: Tuple[int, int], output_image_size: Tuple[int, int], padding, update_bboxes, ) -> Dict: """ Update the annotation for a padded image. """ new_annotation = {} new_annotation["size"] = output_image_size for key, value in annotation.items(): if key == "masks": masks = value masks = pad( masks, padding, mode=PaddingMode.CONSTANT, constant_values=0, input_data_format=ChannelDimension.FIRST, ) masks = safe_squeeze(masks, 1) new_annotation["masks"] = masks elif key == "boxes" and update_bboxes: boxes = value boxes *= np.asarray( [ input_image_size[1] / output_image_size[1], input_image_size[0] / output_image_size[0], input_image_size[1] / output_image_size[1], input_image_size[0] / output_image_size[0], ] ) new_annotation["boxes"] = boxes elif key == "size": new_annotation["size"] = output_image_size else: new_annotation[key] = value return new_annotation # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._pad_image def _pad_image( self, image: np.ndarray, output_size: Tuple[int, int], annotation: Optional[Dict[str, Any]] = None, constant_values: Union[float, Iterable[float]] = 0, data_format: Optional[ChannelDimension] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, update_bboxes: bool = True, ) -> np.ndarray: """ Pad an image with zeros to the given size. """ input_height, input_width = get_image_size(image, channel_dim=input_data_format) output_height, output_width = output_size pad_bottom = output_height - input_height pad_right = output_width - input_width padding = ((0, pad_bottom), (0, pad_right)) padded_image = pad( image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format, ) if annotation is not None: annotation = self._update_annotation_for_padded_image( annotation, (input_height, input_width), (output_height, output_width), padding, update_bboxes ) return padded_image, annotation # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad def pad( self, images: List[np.ndarray], annotations: Optional[Union[AnnotationType, List[AnnotationType]]] = None, constant_values: Union[float, Iterable[float]] = 0, return_pixel_mask: bool = True, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, update_bboxes: bool = True, pad_size: Optional[Dict[str, int]] = None, ) -> BatchFeature: """ Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width in the batch and optionally returns their corresponding pixel mask. Args: images (List[`np.ndarray`]): Images to pad. annotations (`AnnotationType` or `List[AnnotationType]`, *optional*): Annotations to transform according to the padding that is applied to the images. constant_values (`float` or `Iterable[float]`, *optional*): The value to use for the padding if `mode` is `"constant"`. return_pixel_mask (`bool`, *optional*, defaults to `True`): Whether to return a pixel mask. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. update_bboxes (`bool`, *optional*, defaults to `True`): Whether to update the bounding boxes in the annotations to match the padded images. If the bounding boxes have not been converted to relative coordinates and `(centre_x, centre_y, width, height)` format, the bounding boxes will not be updated. pad_size (`Dict[str, int]`, *optional*): The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest height and width in the batch. """ pad_size = pad_size if pad_size is not None else self.pad_size if pad_size is not None: padded_size = (pad_size["height"], pad_size["width"]) else: padded_size = get_max_height_width(images, input_data_format=input_data_format) annotation_list = annotations if annotations is not None else [None] * len(images) padded_images = [] padded_annotations = [] for image, annotation in zip(images, annotation_list): padded_image, padded_annotation = self._pad_image( image, padded_size, annotation, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format, update_bboxes=update_bboxes, ) padded_images.append(padded_image) padded_annotations.append(padded_annotation) data = {"pixel_values": padded_images} if return_pixel_mask: masks = [ make_pixel_mask(image=image, output_size=padded_size, input_data_format=input_data_format) for image in images ] data["pixel_mask"] = masks encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) if annotations is not None: encoded_inputs["labels"] = [ BatchFeature(annotation, tensor_type=return_tensors) for annotation in padded_annotations ] return encoded_inputs @filter_out_non_signature_kwargs() def preprocess( self, images: ImageInput, annotations: Optional[Union[AnnotationType, List[AnnotationType]]] = None, return_segmentation_masks: bool = None, masks_path: Optional[Union[str, pathlib.Path]] = None, do_resize: Optional[bool] = None, size: Optional[Dict[str, int]] = None, resample=None, # PILImageResampling do_rescale: Optional[bool] = None, rescale_factor: Optional[Union[int, float]] = None, do_normalize: Optional[bool] = None, do_convert_annotations: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_pad: Optional[bool] = None, format: Optional[Union[str, AnnotationFormat]] = None, return_tensors: Optional[Union[TensorType, str]] = None, data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, pad_size: Optional[Dict[str, int]] = None, ) -> BatchFeature: """ Preprocess an image or a batch of images so that it can be used by the model. Args: images (`ImageInput`): Image or batch of images to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. annotations (`AnnotationType` or `List[AnnotationType]`, *optional*): List of annotations associated with the image or batch of images. If annotation is for object detection, the annotations should be a dictionary with the following keys: - "image_id" (`int`): The image id. - "annotations" (`List[Dict]`): List of annotations for an image. Each annotation should be a dictionary. An image can have no annotations, in which case the list should be empty. If annotation is for segmentation, the annotations should be a dictionary with the following keys: - "image_id" (`int`): The image id. - "segments_info" (`List[Dict]`): List of segments for an image. Each segment should be a dictionary. An image can have no segments, in which case the list should be empty. - "file_name" (`str`): The file name of the image. return_segmentation_masks (`bool`, *optional*, defaults to self.return_segmentation_masks): Whether to return segmentation masks. masks_path (`str` or `pathlib.Path`, *optional*): Path to the directory containing the segmentation masks. do_resize (`bool`, *optional*, defaults to self.do_resize): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to self.size): Size of the image's `(height, width)` dimensions after resizing. Available options are: - `{"height": int, "width": int}`: The image will be resized to the exact size `(height, width)`. Do NOT keep the aspect ratio. - `{"shortest_edge": int, "longest_edge": int}`: The image will be resized to a maximum size respecting the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge less or equal to `longest_edge`. - `{"max_height": int, "max_width": int}`: The image will be resized to the maximum size respecting the aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to `max_width`. resample (`PILImageResampling`, *optional*, defaults to self.resample): Resampling filter to use when resizing the image. do_rescale (`bool`, *optional*, defaults to self.do_rescale): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to self.rescale_factor): Rescale factor to use when rescaling the image. do_normalize (`bool`, *optional*, defaults to self.do_normalize): Whether to normalize the image. do_convert_annotations (`bool`, *optional*, defaults to self.do_convert_annotations): Whether to convert the annotations to the format expected by the model. Converts the bounding boxes from the format `(top_left_x, top_left_y, width, height)` to `(center_x, center_y, width, height)` and in relative coordinates. image_mean (`float` or `List[float]`, *optional*, defaults to self.image_mean): Mean to use when normalizing the image. image_std (`float` or `List[float]`, *optional*, defaults to self.image_std): Standard deviation to use when normalizing the image. do_pad (`bool`, *optional*, defaults to self.do_pad): Whether to pad the image. If `True`, padding will be applied to the bottom and right of the image with zeros. If `pad_size` is provided, the image will be padded to the specified dimensions. Otherwise, the image will be padded to the maximum height and width of the batch. format (`str` or `AnnotationFormat`, *optional*, defaults to self.format): Format of the annotations. return_tensors (`str` or `TensorType`, *optional*, defaults to self.return_tensors): Type of tensors to return. If `None`, will return the list of images. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. pad_size (`Dict[str, int]`, *optional*): The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest height and width in the batch. """ do_resize = self.do_resize if do_resize is None else do_resize size = self.size if size is None else size size = get_size_dict(size=size, default_to_square=True) resample = self.resample if resample is None else resample do_rescale = self.do_rescale if do_rescale is None else do_rescale rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor do_normalize = self.do_normalize if do_normalize is None else do_normalize image_mean = self.image_mean if image_mean is None else image_mean image_std = self.image_std if image_std is None else image_std do_convert_annotations = ( self.do_convert_annotations if do_convert_annotations is None else do_convert_annotations ) do_pad = self.do_pad if do_pad is None else do_pad pad_size = self.pad_size if pad_size is None else pad_size format = self.format if format is None else format images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) # Here, the pad() method pads to the maximum of (width, height). It does not need to be validated. validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample, ) if annotations is not None and isinstance(annotations, dict): annotations = [annotations] if annotations is not None and len(images) != len(annotations): raise ValueError( f"The number of images ({len(images)}) and annotations ({len(annotations)}) do not match." ) format = AnnotationFormat(format) if annotations is not None: validate_annotations(format, SUPPORTED_ANNOTATION_FORMATS, annotations) images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) # All transformations expect numpy arrays images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) # prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image) if annotations is not None: prepared_images = [] prepared_annotations = [] for image, target in zip(images, annotations): target = self.prepare_annotation( image, target, format, return_segmentation_masks=return_segmentation_masks, masks_path=masks_path, input_data_format=input_data_format, ) prepared_images.append(image) prepared_annotations.append(target) images = prepared_images annotations = prepared_annotations del prepared_images, prepared_annotations # transformations if do_resize: if annotations is not None: resized_images, resized_annotations = [], [] for image, target in zip(images, annotations): orig_size = get_image_size(image, input_data_format) resized_image = self.resize( image, size=size, resample=resample, input_data_format=input_data_format ) resized_annotation = self.resize_annotation( target, orig_size, get_image_size(resized_image, input_data_format) ) resized_images.append(resized_image) resized_annotations.append(resized_annotation) images = resized_images annotations = resized_annotations del resized_images, resized_annotations else: images = [ self.resize(image, size=size, resample=resample, input_data_format=input_data_format) for image in images ] if do_rescale: images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images] if do_normalize: images = [ self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images ] if do_convert_annotations and annotations is not None: annotations = [ self.normalize_annotation(annotation, get_image_size(image, input_data_format)) for annotation, image in zip(annotations, images) ] if do_pad: # Pads images and returns their mask: {'pixel_values': ..., 'pixel_mask': ...} encoded_inputs = self.pad( images, annotations=annotations, return_pixel_mask=True, data_format=data_format, input_data_format=input_data_format, update_bboxes=do_convert_annotations, return_tensors=return_tensors, pad_size=pad_size, ) else: images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] encoded_inputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors) if annotations is not None: encoded_inputs["labels"] = [ BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations ] return encoded_inputs def post_process_object_detection( self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, List[Tuple]] = None, use_focal_loss: bool = True, ): """ Converts the raw output of [`DetrForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch. Args: outputs ([`DetrObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.5): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size `(height, width)` of each image in the batch. If unset, predictions will not be resized. use_focal_loss (`bool` defaults to `True`): Variable informing if the focal loss was used to predict the outputs. If `True`, a sigmoid is applied to compute the scores of each detection, otherwise, a softmax function is used. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ requires_backends(self, ["torch"]) out_logits, out_bbox = outputs.logits, outputs.pred_boxes # convert from relative cxcywh to absolute xyxy boxes = center_to_corners_format(out_bbox) if target_sizes is not None: if len(out_logits) != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) if isinstance(target_sizes, List): img_h, img_w = torch.as_tensor(target_sizes).unbind(1) else: img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) boxes = boxes * scale_fct[:, None, :] num_top_queries = out_logits.shape[1] num_classes = out_logits.shape[2] if use_focal_loss: scores = torch.nn.functional.sigmoid(out_logits) scores, index = torch.topk(scores.flatten(1), num_top_queries, axis=-1) labels = index % num_classes index = index // num_classes boxes = boxes.gather(dim=1, index=index.unsqueeze(-1).repeat(1, 1, boxes.shape[-1])) else: scores = torch.nn.functional.softmax(out_logits)[:, :, :-1] scores, labels = scores.max(dim=-1) if scores.shape[1] > num_top_queries: scores, index = torch.topk(scores, num_top_queries, dim=-1) labels = torch.gather(labels, dim=1, index=index) boxes = torch.gather(boxes, dim=1, index=index.unsqueeze(-1).tile(1, 1, boxes.shape[-1])) results = [] for score, label, box in zip(scores, labels, boxes): results.append( { "scores": score[score > threshold], "labels": label[score > threshold], "boxes": box[score > threshold], } ) return results __all__ = ["RTDetrImageProcessor"]
transformers/src/transformers/models/rt_detr/image_processing_rt_detr.py/0
{ "file_path": "transformers/src/transformers/models/rt_detr/image_processing_rt_detr.py", "repo_id": "transformers", "token_count": 22077 }
134
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Feature extractor class for Speech2Text """ from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, is_speech_available, logging if is_speech_available(): import torch import torchaudio.compliance.kaldi as ta_kaldi logger = logging.get_logger(__name__) class Speech2TextFeatureExtractor(SequenceFeatureExtractor): r""" Constructs a Speech2Text feature extractor. This feature extractor inherits from [`Speech2TextFeatureExtractor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. This class extracts mel-filter bank features from raw speech using TorchAudio if installed or using numpy otherwise, and applies utterance-level cepstral mean and variance normalization to the extracted features. Args: feature_size (`int`, *optional*, defaults to 80): The feature dimension of the extracted features. sampling_rate (`int`, *optional*, defaults to 16000): The sampling rate at which the audio files should be digitalized expressed in hertz (Hz). num_mel_bins (`int`, *optional*, defaults to 80): Number of Mel-frequency bins. padding_value (`float`, *optional*, defaults to 0.0): The value that is used to fill the padding vectors. do_ceptral_normalize (`bool`, *optional*, defaults to `True`): Whether or not to apply utterance-level cepstral mean and variance normalization to extracted features. normalize_means (`bool`, *optional*, defaults to `True`): Whether or not to zero-mean normalize the extracted features. normalize_vars (`bool`, *optional*, defaults to `True`): Whether or not to unit-variance normalize the extracted features. """ model_input_names = ["input_features", "attention_mask"] def __init__( self, feature_size=80, sampling_rate=16000, num_mel_bins=80, padding_value=0.0, do_ceptral_normalize=True, normalize_means=True, normalize_vars=True, **kwargs, ): super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs) self.num_mel_bins = num_mel_bins self.do_ceptral_normalize = do_ceptral_normalize self.normalize_means = normalize_means self.normalize_vars = normalize_vars self.return_attention_mask = True if not is_speech_available(): mel_filters = mel_filter_bank( num_frequency_bins=256, num_mel_filters=self.num_mel_bins, min_frequency=20, max_frequency=sampling_rate // 2, sampling_rate=sampling_rate, norm=None, mel_scale="kaldi", triangularize_in_mel_space=True, ) self.mel_filters = np.pad(mel_filters, ((0, 1), (0, 0))) self.window = window_function(400, "povey", periodic=False) def _extract_fbank_features( self, waveform: np.ndarray, ) -> np.ndarray: """ Get mel-filter bank features using TorchAudio. Note that TorchAudio requires 16-bit signed integers as inputs and hence the waveform should not be normalized before feature extraction. """ waveform = waveform * (2**15) # Kaldi compliance: 16-bit signed integers if is_speech_available(): waveform = torch.from_numpy(waveform).unsqueeze(0) features = ta_kaldi.fbank(waveform, num_mel_bins=self.num_mel_bins, sample_frequency=self.sampling_rate) features = features.numpy() else: waveform = np.squeeze(waveform) features = spectrogram( waveform, self.window, frame_length=400, hop_length=160, fft_length=512, power=2.0, center=False, preemphasis=0.97, mel_filters=self.mel_filters, log_mel="log", mel_floor=1.192092955078125e-07, remove_dc_offset=True, ).T return features @staticmethod def utterance_cmvn( x: np.ndarray, input_length: int, normalize_means: Optional[bool] = True, normalize_vars: Optional[bool] = True, padding_value: float = 0.0, ) -> np.ndarray: # make sure we normalize float32 arrays if normalize_means: mean = x[:input_length].mean(axis=0) x = np.subtract(x, mean) if normalize_vars: std = x[:input_length].std(axis=0) x = np.divide(x, std) if input_length < x.shape[0]: x[input_length:] = padding_value # make sure array is in float32 x = x.astype(np.float32) return x def normalize( self, input_features: List[np.ndarray], attention_mask: Optional[np.ndarray] = None ) -> List[np.ndarray]: lengths = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(x, n, self.normalize_means, self.normalize_vars, self.padding_value) for x, n in zip(input_features, lengths) ] def __call__( self, raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], padding: Union[bool, str, PaddingStrategy] = False, max_length: Optional[int] = None, truncation: bool = False, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, sampling_rate: Optional[int] = None, return_attention_mask: Optional[bool] = None, **kwargs, ) -> BatchFeature: """ Main method to featurize and prepare for the model one or several sequence(s). Args: raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`): The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not stereo, i.e. single float per timestep. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). truncation (`bool`): Activates truncation to cut input sequences longer than *max_length* to *max_length*. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. return_attention_mask (`bool`, *optional*): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific feature_extractor's default. [What are attention masks?](../glossary#attention-mask) <Tip> For Speech2TextTransformer models, `attention_mask` should always be passed for batched inference, to avoid subtle bugs. </Tip> return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. sampling_rate (`int`, *optional*): The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors. padding_value (`float`, *optional*, defaults to 0.0): The value that is used to fill the padding values / vectors. """ if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with" f" {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}") is_batched = is_batched_numpy or ( isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list))) ) if is_batched: raw_speech = [np.asarray(speech, dtype=np.float32) for speech in raw_speech] elif not is_batched and not isinstance(raw_speech, np.ndarray): raw_speech = np.asarray(raw_speech, dtype=np.float32) elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64): raw_speech = raw_speech.astype(np.float32) # always return batch if not is_batched: raw_speech = [raw_speech] # extract fbank features features = [self._extract_fbank_features(waveform) for waveform in raw_speech] # convert into correct format for padding encoded_inputs = BatchFeature({"input_features": features}) padded_inputs = self.pad( encoded_inputs, padding=padding, max_length=max_length, truncation=truncation, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, **kwargs, ) # make sure list is in array format input_features = padded_inputs.get("input_features") if isinstance(input_features[0], list): padded_inputs["input_features"] = [np.asarray(feature, dtype=np.float32) for feature in input_features] attention_mask = padded_inputs.get("attention_mask") if attention_mask is not None: padded_inputs["attention_mask"] = [np.asarray(array, dtype=np.int32) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: attention_mask = ( np.array(attention_mask, dtype=np.int32) if self._get_padding_strategies(padding, max_length=max_length) is not PaddingStrategy.DO_NOT_PAD else None ) padded_inputs["input_features"] = self.normalize( padded_inputs["input_features"], attention_mask=attention_mask ) if return_tensors is not None: padded_inputs = padded_inputs.convert_to_tensors(return_tensors) return padded_inputs __all__ = ["Speech2TextFeatureExtractor"]
transformers/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py/0
{ "file_path": "transformers/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py", "repo_id": "transformers", "token_count": 5626 }
135
# coding=utf-8 # Copyright 2021 Tel AViv University, AllenAI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Splinter model.""" import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, ModelOutput, QuestionAnsweringModelOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_splinter import SplinterConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "tau/splinter-base" _CONFIG_FOR_DOC = "SplinterConfig" class SplinterEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") def forward( self, input_ids: Optional[torch.LongTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, past_key_values_length: Optional[int] = 0, ) -> Tuple: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Splinter class SplinterSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr( config, "position_embedding_type", "absolute" ) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) use_cache = past_key_value is not None if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": query_length, key_length = query_layer.shape[2], key_layer.shape[2] if use_cache: position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view( -1, 1 ) else: position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in SplinterModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->Splinter class SplinterSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states SPLINTER_SELF_ATTENTION_CLASSES = { "eager": SplinterSelfAttention, } # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Splinter,BERT->SPLINTER class SplinterAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = SPLINTER_SELF_ATTENTION_CLASSES[config._attn_implementation]( config, position_embedding_type=position_embedding_type ) self.output = SplinterSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Splinter class SplinterIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->Splinter class SplinterOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Splinter class SplinterLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = SplinterAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = SplinterAttention(config, position_embedding_type="absolute") self.intermediate = SplinterIntermediate(config) self.output = SplinterOutput(config) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" " by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Splinter class SplinterEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([SplinterLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) class SplinterPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = SplinterConfig base_model_prefix = "splinter" supports_gradient_checkpointing = True # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) SPLINTER_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`SplinterConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ SPLINTER_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `{0}`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `{0}`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `{0}`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare Splinter Model transformer outputting raw hidden-states without any specific head on top.", SPLINTER_START_DOCSTRING, ) class SplinterModel(SplinterPreTrainedModel): """ The model is an encoder (with only self-attention) following the architecture described in [Attention is all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. """ def __init__(self, config): super().__init__(config) self.config = config self.embeddings = SplinterEmbeddings(config) self.encoder = SplinterEncoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(SPLINTER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=sequence_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) class SplinterFullyConnectedLayer(nn.Module): def __init__(self, input_dim, output_dim, hidden_act="gelu"): super().__init__() self.input_dim = input_dim self.output_dim = output_dim self.dense = nn.Linear(self.input_dim, self.output_dim) self.act_fn = ACT2FN[hidden_act] self.LayerNorm = nn.LayerNorm(self.output_dim) def forward(self, inputs: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(inputs) hidden_states = self.act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class QuestionAwareSpanSelectionHead(nn.Module): """ Implementation of Question-Aware Span Selection (QASS) head, described in Splinter's paper: """ def __init__(self, config): super().__init__() self.query_start_transform = SplinterFullyConnectedLayer(config.hidden_size, config.hidden_size) self.query_end_transform = SplinterFullyConnectedLayer(config.hidden_size, config.hidden_size) self.start_transform = SplinterFullyConnectedLayer(config.hidden_size, config.hidden_size) self.end_transform = SplinterFullyConnectedLayer(config.hidden_size, config.hidden_size) self.start_classifier = nn.Linear(config.hidden_size, config.hidden_size, bias=False) self.end_classifier = nn.Linear(config.hidden_size, config.hidden_size, bias=False) def forward(self, inputs, positions): _, _, dim = inputs.size() index = positions.unsqueeze(-1).repeat(1, 1, dim) # [batch_size, num_positions, dim] gathered_reps = torch.gather(inputs, dim=1, index=index) # [batch_size, num_positions, dim] query_start_reps = self.query_start_transform(gathered_reps) # [batch_size, num_positions, dim] query_end_reps = self.query_end_transform(gathered_reps) # [batch_size, num_positions, dim] start_reps = self.start_transform(inputs) # [batch_size, seq_length, dim] end_reps = self.end_transform(inputs) # [batch_size, seq_length, dim] hidden_states = self.start_classifier(query_start_reps) # [batch_size, num_positions, dim] start_reps = start_reps.permute(0, 2, 1) # [batch_size, dim, seq_length] start_logits = torch.matmul(hidden_states, start_reps) hidden_states = self.end_classifier(query_end_reps) end_reps = end_reps.permute(0, 2, 1) end_logits = torch.matmul(hidden_states, end_reps) return start_logits, end_logits @add_start_docstrings( """ Splinter Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, SPLINTER_START_DOCSTRING, ) class SplinterForQuestionAnswering(SplinterPreTrainedModel): def __init__(self, config): super().__init__(config) self.splinter = SplinterModel(config) self.splinter_qass = QuestionAwareSpanSelectionHead(config) self.question_token_id = config.question_token_id # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(SPLINTER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, question_positions: Optional[torch.LongTensor] = None, ) -> Union[Tuple, QuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. question_positions (`torch.LongTensor` of shape `(batch_size, num_questions)`, *optional*): The positions of all question tokens. If given, start_logits and end_logits will be of shape `(batch_size, num_questions, sequence_length)`. If None, the first question token in each sequence in the batch will be the only one for which start_logits and end_logits are calculated and they will be of shape `(batch_size, sequence_length)`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict question_positions_were_none = False if question_positions is None: if input_ids is not None: question_position_for_each_example = torch.argmax( (torch.eq(input_ids, self.question_token_id)).int(), dim=-1 ) else: question_position_for_each_example = torch.zeros( inputs_embeds.size(0), dtype=torch.long, layout=inputs_embeds.layout, device=inputs_embeds.device ) question_positions = question_position_for_each_example.unsqueeze(-1) question_positions_were_none = True outputs = self.splinter( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] start_logits, end_logits = self.splinter_qass(sequence_output, question_positions) if question_positions_were_none: start_logits, end_logits = start_logits.squeeze(1), end_logits.squeeze(1) if attention_mask is not None: start_logits = start_logits + (1 - attention_mask) * torch.finfo(start_logits.dtype).min end_logits = end_logits + (1 - attention_mask) * torch.finfo(end_logits.dtype).min total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @dataclass class SplinterForPreTrainingOutput(ModelOutput): """ Class for outputs of Splinter as a span selection model. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when start and end positions are provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_logits (`torch.FloatTensor` of shape `(batch_size, num_questions, sequence_length)`): Span-start scores (before SoftMax). end_logits (`torch.FloatTensor` of shape `(batch_size, num_questions, sequence_length)`): Span-end scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None start_logits: torch.FloatTensor = None end_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @add_start_docstrings( """ Splinter Model for the recurring span selection task as done during the pretraining. The difference to the QA task is that we do not have a question, but multiple question tokens that replace the occurrences of recurring spans instead. """, SPLINTER_START_DOCSTRING, ) class SplinterForPreTraining(SplinterPreTrainedModel): def __init__(self, config): super().__init__(config) self.splinter = SplinterModel(config) self.splinter_qass = QuestionAwareSpanSelectionHead(config) self.question_token_id = config.question_token_id # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward( SPLINTER_INPUTS_DOCSTRING.format("batch_size, num_questions, sequence_length") ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, question_positions: Optional[torch.LongTensor] = None, ) -> Union[Tuple, SplinterForPreTrainingOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size, num_questions)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size, num_questions)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. question_positions (`torch.LongTensor` of shape `(batch_size, num_questions)`, *optional*): The positions of all question tokens. If given, start_logits and end_logits will be of shape `(batch_size, num_questions, sequence_length)`. If None, the first question token in each sequence in the batch will be the only one for which start_logits and end_logits are calculated and they will be of shape `(batch_size, sequence_length)`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if question_positions is None and start_positions is not None and end_positions is not None: raise TypeError("question_positions must be specified in order to calculate the loss") elif question_positions is None and input_ids is None: raise TypeError("question_positions must be specified when input_embeds is used") elif question_positions is None: question_positions = self._prepare_question_positions(input_ids) outputs = self.splinter( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] batch_size, sequence_length, dim = sequence_output.size() # [batch_size, num_questions, sequence_length] start_logits, end_logits = self.splinter_qass(sequence_output, question_positions) num_questions = question_positions.size(1) if attention_mask is not None: attention_mask_for_each_question = attention_mask.unsqueeze(1).expand( batch_size, num_questions, sequence_length ) start_logits = start_logits + (1 - attention_mask_for_each_question) * torch.finfo(start_logits.dtype).min end_logits = end_logits + (1 - attention_mask_for_each_question) * torch.finfo(end_logits.dtype).min total_loss = None # [batch_size, num_questions, sequence_length] if start_positions is not None and end_positions is not None: # sometimes the start/end positions are outside our model inputs, we ignore these terms start_positions.clamp_(0, max(0, sequence_length - 1)) end_positions.clamp_(0, max(0, sequence_length - 1)) # Ignore zero positions in the loss. Splinter never predicts zero # during pretraining and zero is used for padding question # tokens as well as for start and end positions of padded # question tokens. loss_fct = CrossEntropyLoss(ignore_index=self.config.pad_token_id) start_loss = loss_fct( start_logits.view(batch_size * num_questions, sequence_length), start_positions.view(batch_size * num_questions), ) end_loss = loss_fct( end_logits.view(batch_size * num_questions, sequence_length), end_positions.view(batch_size * num_questions), ) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return SplinterForPreTrainingOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def _prepare_question_positions(self, input_ids: torch.Tensor) -> torch.Tensor: rows, flat_positions = torch.where(input_ids == self.config.question_token_id) num_questions = torch.bincount(rows) positions = torch.full( (input_ids.size(0), num_questions.max()), self.config.pad_token_id, dtype=torch.long, device=input_ids.device, ) cols = torch.cat([torch.arange(n) for n in num_questions]) positions[rows, cols] = flat_positions return positions __all__ = [ "SplinterForQuestionAnswering", "SplinterForPreTraining", "SplinterLayer", "SplinterModel", "SplinterPreTrainedModel", ]
transformers/src/transformers/models/splinter/modeling_splinter.py/0
{ "file_path": "transformers/src/transformers/models/splinter/modeling_splinter.py", "repo_id": "transformers", "token_count": 22264 }
136
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING, List from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING if TYPE_CHECKING: from ..superpoint import SuperPointConfig logger = logging.get_logger(__name__) class SuperGlueConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SuperGlueModel`]. It is used to instantiate a SuperGlue model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the SuperGlue [magic-leap-community/superglue_indoor](https://huggingface.co/magic-leap-community/superglue_indoor) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: keypoint_detector_config (`Union[AutoConfig, dict]`, *optional*, defaults to `SuperPointConfig`): The config object or dictionary of the keypoint detector. hidden_size (`int`, *optional*, defaults to 256): The dimension of the descriptors. keypoint_encoder_sizes (`List[int]`, *optional*, defaults to `[32, 64, 128, 256]`): The sizes of the keypoint encoder layers. gnn_layers_types (`List[str]`, *optional*, defaults to `['self', 'cross', 'self', 'cross', 'self', 'cross', 'self', 'cross', 'self', 'cross', 'self', 'cross', 'self', 'cross', 'self', 'cross', 'self', 'cross']`): The types of the GNN layers. Must be either 'self' or 'cross'. num_attention_heads (`int`, *optional*, defaults to 4): The number of heads in the GNN layers. sinkhorn_iterations (`int`, *optional*, defaults to 100): The number of Sinkhorn iterations. matching_threshold (`float`, *optional*, defaults to 0.0): The matching threshold. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. Examples: ```python >>> from transformers import SuperGlueConfig, SuperGlueModel >>> # Initializing a SuperGlue superglue style configuration >>> configuration = SuperGlueConfig() >>> # Initializing a model from the superglue style configuration >>> model = SuperGlueModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "superglue" def __init__( self, keypoint_detector_config: "SuperPointConfig" = None, hidden_size: int = 256, keypoint_encoder_sizes: List[int] = None, gnn_layers_types: List[str] = None, num_attention_heads: int = 4, sinkhorn_iterations: int = 100, matching_threshold: float = 0.0, initializer_range: float = 0.02, **kwargs, ): self.gnn_layers_types = gnn_layers_types if gnn_layers_types is not None else ["self", "cross"] * 9 # Check whether all gnn_layers_types are either 'self' or 'cross' if not all(layer_type in ["self", "cross"] for layer_type in self.gnn_layers_types): raise ValueError("All gnn_layers_types must be either 'self' or 'cross'") if hidden_size % num_attention_heads != 0: raise ValueError("hidden_size % num_attention_heads is different from zero") self.keypoint_encoder_sizes = ( keypoint_encoder_sizes if keypoint_encoder_sizes is not None else [32, 64, 128, 256] ) self.hidden_size = hidden_size self.keypoint_encoder_sizes = keypoint_encoder_sizes self.gnn_layers_types = gnn_layers_types self.num_attention_heads = num_attention_heads self.sinkhorn_iterations = sinkhorn_iterations self.matching_threshold = matching_threshold if isinstance(keypoint_detector_config, dict): keypoint_detector_config["model_type"] = ( keypoint_detector_config["model_type"] if "model_type" in keypoint_detector_config else "superpoint" ) keypoint_detector_config = CONFIG_MAPPING[keypoint_detector_config["model_type"]]( **keypoint_detector_config ) if keypoint_detector_config is None: keypoint_detector_config = CONFIG_MAPPING["superpoint"]() self.keypoint_detector_config = keypoint_detector_config self.initializer_range = initializer_range self.attention_probs_dropout_prob = 0 self.is_decoder = False super().__init__(**kwargs) __all__ = ["SuperGlueConfig"]
transformers/src/transformers/models/superglue/configuration_superglue.py/0
{ "file_path": "transformers/src/transformers/models/superglue/configuration_superglue.py", "repo_id": "transformers", "token_count": 2005 }
137
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Swin SimMIM checkpoints from the original repository. URL: https://github.com/microsoft/Swin-Transformer/blob/main/MODELHUB.md#simmim-pretrained-swin-v1-models""" import argparse import requests import torch from PIL import Image from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor def get_swin_config(model_name): config = SwinConfig(image_size=192) if "base" in model_name: window_size = 6 embed_dim = 128 depths = (2, 2, 18, 2) num_heads = (4, 8, 16, 32) elif "large" in model_name: window_size = 12 embed_dim = 192 depths = (2, 2, 18, 2) num_heads = (6, 12, 24, 48) else: raise ValueError("Model not supported, only supports base and large variants") config.window_size = window_size config.embed_dim = embed_dim config.depths = depths config.num_heads = num_heads return config def rename_key(name): if "encoder.mask_token" in name: name = name.replace("encoder.mask_token", "embeddings.mask_token") if "encoder.patch_embed.proj" in name: name = name.replace("encoder.patch_embed.proj", "embeddings.patch_embeddings.projection") if "encoder.patch_embed.norm" in name: name = name.replace("encoder.patch_embed.norm", "embeddings.norm") if "attn.proj" in name: name = name.replace("attn.proj", "attention.output.dense") if "attn" in name: name = name.replace("attn", "attention.self") if "norm1" in name: name = name.replace("norm1", "layernorm_before") if "norm2" in name: name = name.replace("norm2", "layernorm_after") if "mlp.fc1" in name: name = name.replace("mlp.fc1", "intermediate.dense") if "mlp.fc2" in name: name = name.replace("mlp.fc2", "output.dense") if name == "encoder.norm.weight": name = "layernorm.weight" if name == "encoder.norm.bias": name = "layernorm.bias" if "decoder" in name: pass else: name = "swin." + name return name def convert_state_dict(orig_state_dict, model): for key in orig_state_dict.copy().keys(): val = orig_state_dict.pop(key) if "attn_mask" in key: pass elif "qkv" in key: key_split = key.split(".") layer_num = int(key_split[2]) block_num = int(key_split[4]) dim = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.weight"] = ( val[:dim, :] ) orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.weight"] = val[ dim : dim * 2, : ] orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.weight"] = ( val[-dim:, :] ) else: orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.bias"] = val[ :dim ] orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.bias"] = val[ dim : dim * 2 ] orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.bias"] = val[ -dim: ] else: orig_state_dict[rename_key(key)] = val return orig_state_dict def convert_swin_checkpoint(model_name, checkpoint_path, pytorch_dump_folder_path, push_to_hub): state_dict = torch.load(checkpoint_path, map_location="cpu")["model"] config = get_swin_config(model_name) model = SwinForMaskedImageModeling(config) model.eval() new_state_dict = convert_state_dict(state_dict, model) model.load_state_dict(new_state_dict) url = "http://images.cocodataset.org/val2017/000000039769.jpg" image_processor = ViTImageProcessor(size={"height": 192, "width": 192}) image = Image.open(requests.get(url, stream=True).raw) inputs = image_processor(images=image, return_tensors="pt") with torch.no_grad(): outputs = model(**inputs).logits print(outputs.keys()) print("Looks ok!") if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving image processor to {pytorch_dump_folder_path}") image_processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print(f"Pushing model and image processor for {model_name} to hub") model.push_to_hub(f"microsoft/{model_name}") image_processor.push_to_hub(f"microsoft/{model_name}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="swin-base-simmim-window6-192", type=str, choices=["swin-base-simmim-window6-192", "swin-large-simmim-window12-192"], help="Name of the Swin SimMIM model you'd like to convert.", ) parser.add_argument( "--checkpoint_path", default="/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth", type=str, help="Path to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) args = parser.parse_args() convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
transformers/src/transformers/models/swin/convert_swin_simmim_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/swin/convert_swin_simmim_to_pytorch.py", "repo_id": "transformers", "token_count": 2898 }
138
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert SwitchTransformersX checkpoints from the original repository to JAX/FLAX model.""" import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from t5x import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument MOE_LAYER_NAME_MAPPING = { "/attention/": "/0/SelfAttention/", "/self_attention/": "/0/SelfAttention/", "/encoder_decoder_attention/": "/1/EncDecAttention/", "value": "v", "query": "q", "key": "k", "out": "o", "pre_self_attention_layer_norm": "0/layer_norm", "pre_cross_attention_layer_norm": "1/layer_norm", "pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong "token_embedder": "shared", "encoder_norm": "final_layer_norm", "decoder_norm": "final_layer_norm", "relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight", "router/router_weights/w/": "router/classifier/", "roer/roer_weights/w/": "router/classifier/", "logits_dense": "lm_head", } def rename_keys(s_dict): # 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in # the original model keys = list(s_dict.keys()) for key in keys: layer_to_block_of_layer = r".*/layers_(\d+)" new_key = key if re.match(layer_to_block_of_layer, key): new_key = re.sub(r"layers_(\d+)", r"block/\1/layer", new_key) layer_to_block_of_layer = r"(encoder|decoder)\/" if re.match(layer_to_block_of_layer, key): groups = re.match(layer_to_block_of_layer, new_key).groups() if groups[0] == "encoder": new_key = re.sub(r"/mlp/", r"/1/mlp/", new_key) new_key = re.sub(r"/pre_mlp_layer_norm/", r"/1/layer_norm/", new_key) elif groups[0] == "decoder": new_key = re.sub(r"/mlp/", r"/2/mlp/", new_key) new_key = re.sub(r"/pre_mlp_layer_norm/", r"/2/layer_norm/", new_key) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: new_key = new_key.replace(old_key, temp_key) print(f"{key} -> {new_key}") s_dict[new_key] = s_dict.pop(key) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: s_dict["encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"] = s_dict[ "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: s_dict["decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"] = s_dict[ "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys()): if "expert" in key: num_experts = s_dict[key].shape[0] expert_weihts = s_dict[key] for idx in range(num_experts): s_dict[key.replace("expert/", f"experts/expert_{idx}/")] = expert_weihts[idx] print(f"{key} -> {key.replace('expert/', f'experts/expert_{idx}/')}") s_dict.pop(key) return s_dict GIN_TO_CONFIG_MAPPING = { "NUM_ENCODER_LAYERS": "num_layers", "NUM_DECODER_LAYERS": "num_decoder_layers", "NUM_HEADS": "num_heads", "HEAD_DIM": "d_kv", "EMBED_DIM": "d_model", "MLP_DIM": "d_ff", "NUM_SELECTED_EXPERTS": "num_selected_experts", "NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers", "NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers", "dense.MlpBlock.activations": "feed_forward_proj", } def convert_gin_to_config(gin_file, num_experts): # Convert a google style config to the hugging face fromat import regex as re with open(gin_file, "r") as f: raw_gin = f.read() regex_match = re.findall(r"(.*) = ([0-9.]*)", raw_gin) args = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": args[GIN_TO_CONFIG_MAPPING[param]] = float(value) if "." in value else int(value) activation = re.findall(r"(.*activations) = \(\'(.*)\',\)", raw_gin)[0] args[GIN_TO_CONFIG_MAPPING[activation[0]]] = str(activation[1]) args["num_experts"] = num_experts config = SwitchTransformersConfig(**args) return config def convert_flax_checkpoint_to_pytorch( flax_checkpoint_path, config_file, gin_file=None, pytorch_dump_path="./", num_experts=8 ): # Initialise PyTorch model print(f"Loading flax weights from : {flax_checkpoint_path}") flax_params = checkpoints.load_t5x_checkpoint(flax_checkpoint_path) if gin_file is not None: config = convert_gin_to_config(gin_file, num_experts) else: config = SwitchTransformersConfig.from_pretrained(config_file) pt_model = SwitchTransformersForConditionalGeneration(config) flax_params = flax_params["target"] flax_params = flatten_dict(flax_params, sep="/") flax_params = rename_keys(flax_params) flax_params = unflatten_dict(flax_params, sep="/") # Load the flax params in the PT model load_flax_weights_in_pytorch_model(pt_model, flax_params) print(f"Save PyTorch model to {pytorch_dump_path}") pt_model.save_pretrained(pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--switch_t5x_checkpoint_path", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the" " model architecture. If not provided, a `gin_file` has to be provided." ), ) parser.add_argument( "--gin_file", default=None, type=str, required=False, help="Path to the gin config file. If not provided, a `config_file` has to be passed ", ) parser.add_argument( "--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model." ) parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts") args = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_t5x_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
transformers/src/transformers/models/switch_transformers/convert_switch_transformers_original_flax_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/switch_transformers/convert_switch_transformers_original_flax_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 3257 }
139
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Table Transformer checkpoints with native (Transformers) backbone. URL: https://github.com/microsoft/table-transformer """ import argparse from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, ResNetConfig, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def create_rename_keys(config): # here we list all keys to be renamed (original name on the left, our name on the right) rename_keys = [] # stem # fmt: off rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight")) rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight")) rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias")) rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean")) rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var")) # stages for stage_idx in range(len(config.backbone_config.depths)): for layer_idx in range(config.backbone_config.depths[stage_idx]): rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv1.weight", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.0.convolution.weight", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn1.weight", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.0.normalization.weight", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn1.bias", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.0.normalization.bias", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn1.running_mean", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.0.normalization.running_mean", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn1.running_var", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.0.normalization.running_var", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv2.weight", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.1.convolution.weight", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn2.weight", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.1.normalization.weight", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn2.bias", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.1.normalization.bias", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn2.running_mean", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.1.normalization.running_mean", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn2.running_var", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.1.normalization.running_var", ) ) # all ResNet stages except the first one have a downsample as first layer if stage_idx != 0 and layer_idx == 0: rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean", ) ) rename_keys.append( ( # "backbone.conv_encoder.model.encoder.stages.3.layers.0.shortcut.normalization.running_var" f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var", ) ) # fmt: on for i in range(config.encoder_layers): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( ( f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight", ) ) rename_keys.append( (f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias") ) rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias")) rename_keys.append( (f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append( (f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias") ) rename_keys.append( (f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight") ) rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight", ) ) rename_keys.append( (f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias") ) rename_keys.append( ( f"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight", f"decoder.layers.{i}.encoder_attn.out_proj.weight", ) ) rename_keys.append( ( f"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias", f"decoder.layers.{i}.encoder_attn.out_proj.bias", ) ) rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias") ) rename_keys.append( (f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias") ) rename_keys.append( (f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight") ) rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias")) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("input_proj.weight", "input_projection.weight"), ("input_proj.bias", "input_projection.bias"), ("query_embed.weight", "query_position_embeddings.weight"), ("transformer.decoder.norm.weight", "decoder.layernorm.weight"), ("transformer.decoder.norm.bias", "decoder.layernorm.bias"), ("class_embed.weight", "class_labels_classifier.weight"), ("class_embed.bias", "class_labels_classifier.bias"), ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"), ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"), ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"), ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"), ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"), ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"), ("transformer.encoder.norm.weight", "encoder.layernorm.weight"), ("transformer.encoder.norm.bias", "encoder.layernorm.bias"), ] ) return rename_keys def rename_key(state_dict, old, new): val = state_dict.pop(old) state_dict[new] = val def read_in_q_k_v(state_dict, is_panoptic=False): prefix = "" if is_panoptic: prefix = "detr." # first: transformer encoder for i in range(6): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) in_proj_weight = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight") in_proj_bias = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"encoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :] state_dict[f"encoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256] state_dict[f"encoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :] state_dict[f"encoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512] state_dict[f"encoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :] state_dict[f"encoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6): # read in weights + bias of input projection layer of self-attention in_proj_weight = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight") in_proj_bias = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"decoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :] state_dict[f"decoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256] state_dict[f"decoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :] state_dict[f"decoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512] state_dict[f"decoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :] state_dict[f"decoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention in_proj_weight_cross_attn = state_dict.pop( f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" ) in_proj_bias_cross_attn = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias") # next, add query, keys and values (in that order) of cross-attention to the state dict state_dict[f"decoder.layers.{i}.encoder_attn.q_proj.weight"] = in_proj_weight_cross_attn[:256, :] state_dict[f"decoder.layers.{i}.encoder_attn.q_proj.bias"] = in_proj_bias_cross_attn[:256] state_dict[f"decoder.layers.{i}.encoder_attn.k_proj.weight"] = in_proj_weight_cross_attn[256:512, :] state_dict[f"decoder.layers.{i}.encoder_attn.k_proj.bias"] = in_proj_bias_cross_attn[256:512] state_dict[f"decoder.layers.{i}.encoder_attn.v_proj.weight"] = in_proj_weight_cross_attn[-256:, :] state_dict[f"decoder.layers.{i}.encoder_attn.v_proj.bias"] = in_proj_bias_cross_attn[-256:] def resize(image, checkpoint_url): width, height = image.size current_max_size = max(width, height) target_max_size = 800 if "detection" in checkpoint_url else 1000 scale = target_max_size / current_max_size resized_image = image.resize((int(round(scale * width)), int(round(scale * height)))) return resized_image def normalize(image): image = F.to_tensor(image) image = F.normalize(image, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) return image @torch.no_grad() def convert_table_transformer_checkpoint(checkpoint_url, pytorch_dump_folder_path, push_to_hub): """ Copy/paste/tweak model's weights to our DETR structure. """ logger.info("Converting model...") # create HuggingFace model and load state dict backbone_config = ResNetConfig.from_pretrained( "microsoft/resnet-18", out_features=["stage1", "stage2", "stage3", "stage4"] ) config = TableTransformerConfig( backbone_config=backbone_config, use_timm_backbone=False, mask_loss_coefficient=1, dice_loss_coefficient=1, ce_loss_coefficient=1, bbox_loss_coefficient=5, giou_loss_coefficient=2, eos_coefficient=0.4, class_cost=1, bbox_cost=5, giou_cost=2, ) # load original state dict state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") # rename keys for src, dest in create_rename_keys(config): rename_key(state_dict, src, dest) # query, key and value matrices need special treatment read_in_q_k_v(state_dict) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them prefix = "model." for key in state_dict.copy().keys(): if not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor"): val = state_dict.pop(key) state_dict[prefix + key] = val if "detection" in checkpoint_url: config.num_queries = 15 config.num_labels = 2 id2label = {0: "table", 1: "table rotated"} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} else: config.num_queries = 125 config.num_labels = 6 id2label = { 0: "table", 1: "table column", 2: "table row", 3: "table column header", 4: "table projected row header", 5: "table spanning cell", } config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} image_processor = DetrImageProcessor(format="coco_detection", size={"longest_edge": 800}) model = TableTransformerForObjectDetection(config) model.load_state_dict(state_dict) model.eval() # verify our conversion filename = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png" file_path = hf_hub_download(repo_id="nielsr/example-pdf", repo_type="dataset", filename=filename) image = Image.open(file_path).convert("RGB") pixel_values = normalize(resize(image, checkpoint_url)).unsqueeze(0) outputs = model(pixel_values) if "detection" in checkpoint_url: expected_shape = (1, 15, 3) expected_logits = torch.tensor( [[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] ) expected_boxes = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]]) else: expected_shape = (1, 125, 7) expected_logits = torch.tensor( [[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] ) expected_boxes = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]]) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3], expected_logits, atol=1e-4) assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes, atol=1e-4) print("Looks ok!") if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...") Path(pytorch_dump_folder_path).mkdir(exist_ok=True) model.save_pretrained(pytorch_dump_folder_path) image_processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: # Push model to HF hub logger.info("Pushing model to the hub...") model_name = ( "microsoft/table-transformer-detection" if "detection" in checkpoint_url else "microsoft/table-transformer-structure-recognition" ) model.push_to_hub(model_name, revision="no_timm") image_processor.push_to_hub(model_name, revision="no_timm") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth", type=str, choices=[ "https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth", "https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth", ], help="URL of the Table Transformer checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) args = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
transformers/src/transformers/models/table_transformer/convert_table_transformer_to_hf_no_timm.py/0
{ "file_path": "transformers/src/transformers/models/table_transformer/convert_table_transformer_to_hf_no_timm.py", "repo_id": "transformers", "token_count": 10230 }
140
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert ConvNext + UperNet checkpoints from mmsegmentation.""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def get_upernet_config(model_name): auxiliary_in_channels = 384 if "tiny" in model_name: depths = [3, 3, 9, 3] hidden_sizes = [96, 192, 384, 768] if "small" in model_name: depths = [3, 3, 27, 3] hidden_sizes = [96, 192, 384, 768] if "base" in model_name: depths = [3, 3, 27, 3] hidden_sizes = [128, 256, 512, 1024] auxiliary_in_channels = 512 if "large" in model_name: depths = [3, 3, 27, 3] hidden_sizes = [192, 384, 768, 1536] auxiliary_in_channels = 768 if "xlarge" in model_name: depths = [3, 3, 27, 3] hidden_sizes = [256, 512, 1024, 2048] auxiliary_in_channels = 1024 # set label information num_labels = 150 repo_id = "huggingface/label-files" filename = "ade20k-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} label2id = {v: k for k, v in id2label.items()} backbone_config = ConvNextConfig( depths=depths, hidden_sizes=hidden_sizes, out_features=["stage1", "stage2", "stage3", "stage4"] ) config = UperNetConfig( backbone_config=backbone_config, auxiliary_in_channels=auxiliary_in_channels, num_labels=num_labels, id2label=id2label, label2id=label2id, ) return config # here we list all keys to be renamed (original name on the left, our name on the right) def create_rename_keys(config): rename_keys = [] # fmt: off # stem rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight")) rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias")) rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight")) rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias")) # stages for i in range(len(config.backbone_config.depths)): for j in range(config.backbone_config.depths[i]): rename_keys.append((f"backbone.stages.{i}.{j}.gamma", f"backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter")) rename_keys.append((f"backbone.stages.{i}.{j}.depthwise_conv.weight", f"backbone.encoder.stages.{i}.layers.{j}.dwconv.weight")) rename_keys.append((f"backbone.stages.{i}.{j}.depthwise_conv.bias", f"backbone.encoder.stages.{i}.layers.{j}.dwconv.bias")) rename_keys.append((f"backbone.stages.{i}.{j}.norm.weight", f"backbone.encoder.stages.{i}.layers.{j}.layernorm.weight")) rename_keys.append((f"backbone.stages.{i}.{j}.norm.bias", f"backbone.encoder.stages.{i}.layers.{j}.layernorm.bias")) rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv1.weight", f"backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight")) rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv1.bias", f"backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias")) rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv2.weight", f"backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight")) rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv2.bias", f"backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias")) if i > 0: rename_keys.append((f"backbone.downsample_layers.{i}.0.weight", f"backbone.encoder.stages.{i}.downsampling_layer.0.weight")) rename_keys.append((f"backbone.downsample_layers.{i}.0.bias", f"backbone.encoder.stages.{i}.downsampling_layer.0.bias")) rename_keys.append((f"backbone.downsample_layers.{i}.1.weight", f"backbone.encoder.stages.{i}.downsampling_layer.1.weight")) rename_keys.append((f"backbone.downsample_layers.{i}.1.bias", f"backbone.encoder.stages.{i}.downsampling_layer.1.bias")) rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight")) rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias")) # decode head rename_keys.extend( [ ("decode_head.conv_seg.weight", "decode_head.classifier.weight"), ("decode_head.conv_seg.bias", "decode_head.classifier.bias"), ("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"), ("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"), ] ) # fmt: on return rename_keys def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val def convert_upernet_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub): model_name_to_url = { "upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth", "upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth", "upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth", "upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth", "upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth", } checkpoint_url = model_name_to_url[model_name] state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")["state_dict"] config = get_upernet_config(model_name) model = UperNetForSemanticSegmentation(config) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): val = state_dict.pop(key) if "bn" in key: key = key.replace("bn", "batch_norm") state_dict[key] = val # rename keys rename_keys = create_rename_keys(config) for src, dest in rename_keys: rename_key(state_dict, src, dest) model.load_state_dict(state_dict) # verify on image url = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg" image = Image.open(requests.get(url, stream=True).raw).convert("RGB") processor = SegformerImageProcessor() pixel_values = processor(image, return_tensors="pt").pixel_values with torch.no_grad(): outputs = model(pixel_values) if model_name == "upernet-convnext-tiny": expected_slice = torch.tensor( [[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ) elif model_name == "upernet-convnext-small": expected_slice = torch.tensor( [[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] ) elif model_name == "upernet-convnext-base": expected_slice = torch.tensor( [[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] ) elif model_name == "upernet-convnext-large": expected_slice = torch.tensor( [[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] ) elif model_name == "upernet-convnext-xlarge": expected_slice = torch.tensor( [[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] ) print("Logits:", outputs.logits[0, 0, :3, :3]) assert torch.allclose(outputs.logits[0, 0, :3, :3], expected_slice, atol=1e-4) print("Looks ok!") if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving processor to {pytorch_dump_folder_path}") processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print(f"Pushing model and processor for {model_name} to hub") model.push_to_hub(f"openmmlab/{model_name}") processor.push_to_hub(f"openmmlab/{model_name}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="upernet-convnext-tiny", type=str, choices=[f"upernet-convnext-{size}" for size in ["tiny", "small", "base", "large", "xlarge"]], help="Name of the ConvNext UperNet model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) args = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
transformers/src/transformers/models/upernet/convert_convnext_upernet_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/upernet/convert_convnext_upernet_to_pytorch.py", "repo_id": "transformers", "token_count": 4521 }
141
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """VilT model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class ViltConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ViLTModel`]. It is used to instantiate an ViLT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ViLT [dandelin/vilt-b32-mlm](https://huggingface.co/dandelin/vilt-b32-mlm) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the text part of the model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`ViltModel`]. type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`ViltModel`]. This is used when encoding text. modality_type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the modalities passed when calling [`ViltModel`]. This is used after concatening the embeddings of the text and image modalities. max_position_embeddings (`int`, *optional*, defaults to 40): The maximum sequence length that this model might ever be used with. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. image_size (`int`, *optional*, defaults to 384): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 32): The size (resolution) of each patch. num_channels (`int`, *optional*, defaults to 3): The number of input channels. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries, keys and values. max_image_length (`int`, *optional*, defaults to -1): The maximum number of patches to take as input for the Transformer encoder. If set to a positive integer, the encoder will sample `max_image_length` patches at maximum. If set to -1, will not be taken into account. num_images (`int`, *optional*, defaults to -1): The number of images to use for natural language visual reasoning. If set to a positive integer, will be used by [`ViltForImagesAndTextClassification`] for defining the classifier head. Example: ```python >>> from transformers import ViLTModel, ViLTConfig >>> # Initializing a ViLT dandelin/vilt-b32-mlm style configuration >>> configuration = ViLTConfig() >>> # Initializing a model from the dandelin/vilt-b32-mlm style configuration >>> model = ViLTModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "vilt" def __init__( self, vocab_size=30522, type_vocab_size=2, modality_type_vocab_size=2, max_position_embeddings=40, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, image_size=384, patch_size=32, num_channels=3, qkv_bias=True, max_image_length=-1, tie_word_embeddings=False, num_images=-1, **kwargs, ): super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs) self.vocab_size = vocab_size self.type_vocab_size = type_vocab_size self.modality_type_vocab_size = modality_type_vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.qkv_bias = qkv_bias self.max_image_length = max_image_length self.num_images = num_images __all__ = ["ViltConfig"]
transformers/src/transformers/models/vilt/configuration_vilt.py/0
{ "file_path": "transformers/src/transformers/models/vilt/configuration_vilt.py", "repo_id": "transformers", "token_count": 2579 }
142
# coding=utf-8 # Copyright The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """VisionTextDualEncoder model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig from ..chinese_clip.configuration_chinese_clip import ChineseCLIPVisionConfig from ..clip.configuration_clip import CLIPVisionConfig from ..siglip.configuration_siglip import SiglipVisionConfig logger = logging.get_logger(__name__) VISION_MODEL_CONFIGS = { "clip_vision_model": CLIPVisionConfig, "chinese_clip_vision_model": ChineseCLIPVisionConfig, "siglip_vision_model": SiglipVisionConfig, } class VisionTextDualEncoderConfig(PretrainedConfig): r""" [`VisionTextDualEncoderConfig`] is the configuration class to store the configuration of a [`VisionTextDualEncoderModel`]. It is used to instantiate [`VisionTextDualEncoderModel`] model according to the specified arguments, defining the text model and vision model configs. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: projection_dim (`int`, *optional*, defaults to 512): Dimensionality of text and vision projection layers. logit_scale_init_value (`float`, *optional*, defaults to 2.6592): The initial value of the *logit_scale* parameter. Default is used as per the original CLIP implementation. kwargs (*optional*): Dictionary of keyword arguments. Examples: ```python >>> from transformers import ViTConfig, BertConfig, VisionTextDualEncoderConfig, VisionTextDualEncoderModel >>> # Initializing a BERT and ViT configuration >>> config_vision = ViTConfig() >>> config_text = BertConfig() >>> config = VisionTextDualEncoderConfig.from_vision_text_configs(config_vision, config_text, projection_dim=512) >>> # Initializing a BERT and ViT model (with random weights) >>> model = VisionTextDualEncoderModel(config=config) >>> # Accessing the model configuration >>> config_vision = model.config.vision_config >>> config_text = model.config.text_config >>> # Saving the model, including its configuration >>> model.save_pretrained("vit-bert") >>> # loading model and config from pretrained folder >>> vision_text_config = VisionTextDualEncoderConfig.from_pretrained("vit-bert") >>> model = VisionTextDualEncoderModel.from_pretrained("vit-bert", config=vision_text_config) ```""" model_type = "vision-text-dual-encoder" sub_configs = {"vision_config": AutoConfig, "text_config": AutoConfig} is_composition = True def __init__(self, projection_dim=512, logit_scale_init_value=2.6592, **kwargs): super().__init__(**kwargs) if "vision_config" not in kwargs: raise ValueError("`vision_config` can not be `None`.") if "text_config" not in kwargs: raise ValueError("`text_config` can not be `None`.") vision_config = kwargs.pop("vision_config") text_config = kwargs.pop("text_config") vision_model_type = vision_config.pop("model_type") text_model_type = text_config.pop("model_type") vision_config_class = VISION_MODEL_CONFIGS.get(vision_model_type) if vision_config_class is not None: self.vision_config = vision_config_class(**vision_config) else: self.vision_config = AutoConfig.for_model(vision_model_type, **vision_config) if hasattr(self.vision_config, "vision_config"): self.vision_config = self.vision_config.vision_config self.text_config = AutoConfig.for_model(text_model_type, **text_config) self.projection_dim = projection_dim self.logit_scale_init_value = logit_scale_init_value @classmethod def from_vision_text_configs(cls, vision_config: PretrainedConfig, text_config: PretrainedConfig, **kwargs): r""" Instantiate a [`VisionTextDualEncoderConfig`] (or a derived class) from text model configuration and vision model configuration. Returns: [`VisionTextDualEncoderConfig`]: An instance of a configuration object """ return cls(vision_config=vision_config.to_dict(), text_config=text_config.to_dict(), **kwargs) __all__ = ["VisionTextDualEncoderConfig"]
transformers/src/transformers/models/vision_text_dual_encoder/configuration_vision_text_dual_encoder.py/0
{ "file_path": "transformers/src/transformers/models/vision_text_dual_encoder/configuration_vision_text_dual_encoder.py", "repo_id": "transformers", "token_count": 1698 }
143
# coding=utf-8 # Copyright 2021 The Google Flax Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, Tuple import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxBaseModelOutputWithPooling, FlaxSequenceClassifierOutput from ...modeling_flax_utils import ( ACT2FN, FlaxPreTrainedModel, append_replace_return_docstrings, overwrite_call_docstring, ) from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward from .configuration_vit import ViTConfig VIT_START_DOCSTRING = r""" This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models) This model is also a [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: config ([`ViTConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and `jax.numpy.bfloat16` (on TPUs). This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given `dtype`. **Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.** If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and [`~FlaxPreTrainedModel.to_bf16`]. """ VIT_INPUTS_DOCSTRING = r""" Args: pixel_values (`numpy.ndarray` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class FlaxViTPatchEmbeddings(nn.Module): config: ViTConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): image_size = self.config.image_size patch_size = self.config.patch_size num_patches = (image_size // patch_size) * (image_size // patch_size) self.num_patches = num_patches self.num_channels = self.config.num_channels self.projection = nn.Conv( self.config.hidden_size, kernel_size=(patch_size, patch_size), strides=(patch_size, patch_size), padding="VALID", dtype=self.dtype, kernel_init=jax.nn.initializers.variance_scaling( self.config.initializer_range**2, "fan_in", "truncated_normal" ), ) def __call__(self, pixel_values): num_channels = pixel_values.shape[-1] if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) embeddings = self.projection(pixel_values) batch_size, _, _, channels = embeddings.shape return jnp.reshape(embeddings, (batch_size, -1, channels)) class FlaxViTEmbeddings(nn.Module): """Construct the CLS token, position and patch embeddings.""" config: ViTConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.cls_token = self.param( "cls_token", jax.nn.initializers.variance_scaling(self.config.initializer_range**2, "fan_in", "truncated_normal"), (1, 1, self.config.hidden_size), ) self.patch_embeddings = FlaxViTPatchEmbeddings(self.config, dtype=self.dtype) num_patches = self.patch_embeddings.num_patches self.position_embeddings = self.param( "position_embeddings", jax.nn.initializers.variance_scaling(self.config.initializer_range**2, "fan_in", "truncated_normal"), (1, num_patches + 1, self.config.hidden_size), ) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, pixel_values, deterministic=True): batch_size = pixel_values.shape[0] embeddings = self.patch_embeddings(pixel_values) cls_tokens = jnp.broadcast_to(self.cls_token, (batch_size, 1, self.config.hidden_size)) embeddings = jnp.concatenate((cls_tokens, embeddings), axis=1) embeddings = embeddings + self.position_embeddings embeddings = self.dropout(embeddings, deterministic=deterministic) return embeddings class FlaxViTSelfAttention(nn.Module): config: ViTConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): if self.config.hidden_size % self.config.num_attention_heads != 0: raise ValueError( "`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads`:" " {self.config.num_attention_heads}" ) self.query = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.variance_scaling( self.config.initializer_range**2, mode="fan_in", distribution="truncated_normal" ), use_bias=self.config.qkv_bias, ) self.key = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.variance_scaling( self.config.initializer_range**2, mode="fan_in", distribution="truncated_normal" ), use_bias=self.config.qkv_bias, ) self.value = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.variance_scaling( self.config.initializer_range**2, mode="fan_in", distribution="truncated_normal" ), use_bias=self.config.qkv_bias, ) def __call__(self, hidden_states, deterministic: bool = True, output_attentions: bool = False): head_dim = self.config.hidden_size // self.config.num_attention_heads query_states = self.query(hidden_states).reshape( hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim) ) value_states = self.value(hidden_states).reshape( hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim) ) key_states = self.key(hidden_states).reshape( hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim) ) dropout_rng = None if not deterministic and self.config.attention_probs_dropout_prob > 0.0: dropout_rng = self.make_rng("dropout") attn_weights = dot_product_attention_weights( query_states, key_states, dropout_rng=dropout_rng, dropout_rate=self.config.attention_probs_dropout_prob, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, precision=None, ) attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states) attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,)) outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) return outputs class FlaxViTSelfOutput(nn.Module): config: ViTConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.variance_scaling( self.config.initializer_range**2, "fan_in", "truncated_normal" ), dtype=self.dtype, ) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, hidden_states, input_tensor, deterministic: bool = True): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) return hidden_states class FlaxViTAttention(nn.Module): config: ViTConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.attention = FlaxViTSelfAttention(self.config, dtype=self.dtype) self.output = FlaxViTSelfOutput(self.config, dtype=self.dtype) def __call__(self, hidden_states, deterministic=True, output_attentions: bool = False): attn_outputs = self.attention(hidden_states, deterministic=deterministic, output_attentions=output_attentions) attn_output = attn_outputs[0] hidden_states = self.output(attn_output, hidden_states, deterministic=deterministic) outputs = (hidden_states,) if output_attentions: outputs += (attn_outputs[1],) return outputs class FlaxViTIntermediate(nn.Module): config: ViTConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.intermediate_size, kernel_init=jax.nn.initializers.variance_scaling( self.config.initializer_range**2, "fan_in", "truncated_normal" ), dtype=self.dtype, ) self.activation = ACT2FN[self.config.hidden_act] def __call__(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states class FlaxViTOutput(nn.Module): config: ViTConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.variance_scaling( self.config.initializer_range**2, "fan_in", "truncated_normal" ), dtype=self.dtype, ) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, hidden_states, attention_output, deterministic: bool = True): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = hidden_states + attention_output return hidden_states class FlaxViTLayer(nn.Module): config: ViTConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.attention = FlaxViTAttention(self.config, dtype=self.dtype) self.intermediate = FlaxViTIntermediate(self.config, dtype=self.dtype) self.output = FlaxViTOutput(self.config, dtype=self.dtype) self.layernorm_before = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.layernorm_after = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) def __call__(self, hidden_states, deterministic: bool = True, output_attentions: bool = False): attention_outputs = self.attention( self.layernorm_before(hidden_states), # in ViT, layernorm is applied before self-attention deterministic=deterministic, output_attentions=output_attentions, ) attention_output = attention_outputs[0] # first residual connection attention_output = attention_output + hidden_states # in ViT, layernorm is also applied after self-attention layer_output = self.layernorm_after(attention_output) hidden_states = self.intermediate(layer_output) hidden_states = self.output(hidden_states, attention_output, deterministic=deterministic) outputs = (hidden_states,) if output_attentions: outputs += (attention_outputs[1],) return outputs class FlaxViTLayerCollection(nn.Module): config: ViTConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.layers = [ FlaxViTLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers) ] def __call__( self, hidden_states, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for i, layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = layer(hidden_states, deterministic=deterministic, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = (hidden_states,) if not return_dict: return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) class FlaxViTEncoder(nn.Module): config: ViTConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.layer = FlaxViTLayerCollection(self.config, dtype=self.dtype) def __call__( self, hidden_states, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): return self.layer( hidden_states, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) class FlaxViTPooler(nn.Module): config: ViTConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.variance_scaling( self.config.initializer_range**2, "fan_in", "truncated_normal" ), dtype=self.dtype, ) def __call__(self, hidden_states): cls_hidden_state = hidden_states[:, 0] cls_hidden_state = self.dense(cls_hidden_state) return nn.tanh(cls_hidden_state) class FlaxViTPreTrainedModel(FlaxPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = ViTConfig base_model_prefix = "vit" main_input_name = "pixel_values" module_class: nn.Module = None def __init__( self, config: ViTConfig, input_shape=None, seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, **kwargs, ): module = self.module_class(config=config, dtype=dtype, **kwargs) if input_shape is None: input_shape = (1, config.image_size, config.image_size, config.num_channels) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors pixel_values = jnp.zeros(input_shape, dtype=self.dtype) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} random_params = self.module.init(rngs, pixel_values, return_dict=False)["params"] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params @add_start_docstrings_to_model_forward(VIT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) def __call__( self, pixel_values, params: dict = None, dropout_rng: jax.random.PRNGKey = None, train: bool = False, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng return self.module.apply( {"params": params or self.params}, jnp.array(pixel_values, dtype=jnp.float32), not train, output_attentions, output_hidden_states, return_dict, rngs=rngs, ) class FlaxViTModule(nn.Module): config: ViTConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation add_pooling_layer: bool = True def setup(self): self.embeddings = FlaxViTEmbeddings(self.config, dtype=self.dtype) self.encoder = FlaxViTEncoder(self.config, dtype=self.dtype) self.layernorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.pooler = FlaxViTPooler(self.config, dtype=self.dtype) if self.add_pooling_layer else None def __call__( self, pixel_values, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): hidden_states = self.embeddings(pixel_values, deterministic=deterministic) outputs = self.encoder( hidden_states, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.layernorm(hidden_states) pooled = self.pooler(hidden_states) if self.add_pooling_layer else None if not return_dict: # if pooled is None, don't return it if pooled is None: return (hidden_states,) + outputs[1:] return (hidden_states, pooled) + outputs[1:] return FlaxBaseModelOutputWithPooling( last_hidden_state=hidden_states, pooler_output=pooled, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( "The bare ViT Model transformer outputting raw hidden-states without any specific head on top.", VIT_START_DOCSTRING, ) class FlaxViTModel(FlaxViTPreTrainedModel): module_class = FlaxViTModule FLAX_VISION_MODEL_DOCSTRING = """ Returns: Examples: ```python >>> from transformers import AutoImageProcessor, FlaxViTModel >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k") >>> model = FlaxViTModel.from_pretrained("google/vit-base-patch16-224-in21k") >>> inputs = image_processor(images=image, return_tensors="np") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ``` """ overwrite_call_docstring(FlaxViTModel, FLAX_VISION_MODEL_DOCSTRING) append_replace_return_docstrings(FlaxViTModel, output_type=FlaxBaseModelOutputWithPooling, config_class=ViTConfig) class FlaxViTForImageClassificationModule(nn.Module): config: ViTConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.vit = FlaxViTModule(config=self.config, dtype=self.dtype, add_pooling_layer=False) self.classifier = nn.Dense( self.config.num_labels, dtype=self.dtype, kernel_init=jax.nn.initializers.variance_scaling( self.config.initializer_range**2, "fan_in", "truncated_normal" ), ) def __call__( self, pixel_values=None, deterministic: bool = True, output_attentions=None, output_hidden_states=None, return_dict=None, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.vit( pixel_values, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] logits = self.classifier(hidden_states[:, 0, :]) if not return_dict: output = (logits,) + outputs[2:] return output return FlaxSequenceClassifierOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ ViT Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet. """, VIT_START_DOCSTRING, ) class FlaxViTForImageClassification(FlaxViTPreTrainedModel): module_class = FlaxViTForImageClassificationModule FLAX_VISION_CLASSIF_DOCSTRING = """ Returns: Example: ```python >>> from transformers import AutoImageProcessor, FlaxViTForImageClassification >>> from PIL import Image >>> import jax >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224") >>> model = FlaxViTForImageClassification.from_pretrained("google/vit-base-patch16-224") >>> inputs = image_processor(images=image, return_tensors="np") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> # model predicts one of the 1000 ImageNet classes >>> predicted_class_idx = jax.numpy.argmax(logits, axis=-1) >>> print("Predicted class:", model.config.id2label[predicted_class_idx.item()]) ``` """ overwrite_call_docstring(FlaxViTForImageClassification, FLAX_VISION_CLASSIF_DOCSTRING) append_replace_return_docstrings( FlaxViTForImageClassification, output_type=FlaxSequenceClassifierOutput, config_class=ViTConfig ) __all__ = ["FlaxViTForImageClassification", "FlaxViTModel", "FlaxViTPreTrainedModel"]
transformers/src/transformers/models/vit/modeling_flax_vit.py/0
{ "file_path": "transformers/src/transformers/models/vit/modeling_flax_vit.py", "repo_id": "transformers", "token_count": 10880 }
144
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """VitMatte model configuration""" import copy from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import verify_backbone_config_arguments from ..auto.configuration_auto import CONFIG_MAPPING logger = logging.get_logger(__name__) class VitMatteConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of [`VitMatteForImageMatting`]. It is used to instantiate a ViTMatte model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ViTMatte [hustvl/vitmatte-small-composition-1k](https://huggingface.co/hustvl/vitmatte-small-composition-1k) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: backbone_config (`PretrainedConfig` or `dict`, *optional*, defaults to `VitDetConfig()`): The configuration of the backbone model. backbone (`str`, *optional*): Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. use_pretrained_backbone (`bool`, *optional*, defaults to `False`): Whether to use pretrained weights for the backbone. use_timm_backbone (`bool`, *optional*, defaults to `False`): Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers library. backbone_kwargs (`dict`, *optional*): Keyword arguments to be passed to AutoBackbone when loading from a checkpoint e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. hidden_size (`int`, *optional*, defaults to 384): The number of input channels of the decoder. batch_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the batch norm layers. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. convstream_hidden_sizes (`List[int]`, *optional*, defaults to `[48, 96, 192]`): The output channels of the ConvStream module. fusion_hidden_sizes (`List[int]`, *optional*, defaults to `[256, 128, 64, 32]`): The output channels of the Fusion blocks. Example: ```python >>> from transformers import VitMatteConfig, VitMatteForImageMatting >>> # Initializing a ViTMatte hustvl/vitmatte-small-composition-1k style configuration >>> configuration = VitMatteConfig() >>> # Initializing a model (with random weights) from the hustvl/vitmatte-small-composition-1k style configuration >>> model = VitMatteForImageMatting(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "vitmatte" def __init__( self, backbone_config: PretrainedConfig = None, backbone=None, use_pretrained_backbone=False, use_timm_backbone=False, backbone_kwargs=None, hidden_size: int = 384, batch_norm_eps: float = 1e-5, initializer_range: float = 0.02, convstream_hidden_sizes: List[int] = [48, 96, 192], fusion_hidden_sizes: List[int] = [256, 128, 64, 32], **kwargs, ): super().__init__(**kwargs) if backbone_config is None and backbone is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `VitDet` backbone.") backbone_config = CONFIG_MAPPING["vitdet"](out_features=["stage4"]) elif isinstance(backbone_config, dict): backbone_model_type = backbone_config.get("model_type") config_class = CONFIG_MAPPING[backbone_model_type] backbone_config = config_class.from_dict(backbone_config) verify_backbone_config_arguments( use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, backbone=backbone, backbone_config=backbone_config, backbone_kwargs=backbone_kwargs, ) self.backbone_config = backbone_config self.backbone = backbone self.use_pretrained_backbone = use_pretrained_backbone self.use_timm_backbone = use_timm_backbone self.backbone_kwargs = backbone_kwargs self.batch_norm_eps = batch_norm_eps self.hidden_size = hidden_size self.initializer_range = initializer_range self.convstream_hidden_sizes = convstream_hidden_sizes self.fusion_hidden_sizes = fusion_hidden_sizes def to_dict(self): """ Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`]. Returns: `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance, """ output = copy.deepcopy(self.__dict__) output["backbone_config"] = self.backbone_config.to_dict() output["model_type"] = self.__class__.model_type return output __all__ = ["VitMatteConfig"]
transformers/src/transformers/models/vitmatte/configuration_vitmatte.py/0
{ "file_path": "transformers/src/transformers/models/vitmatte/configuration_vitmatte.py", "repo_id": "transformers", "token_count": 2292 }
145
# coding=utf-8 # Copyright 2023 The Kakao Enterprise Authors, the MMS-TTS Authors and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization class for VITS.""" import json import os import re from typing import Any, Dict, List, Optional, Tuple, Union from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_phonemizer_available, is_uroman_available, logging if is_phonemizer_available(): import phonemizer if is_uroman_available(): import uroman as ur logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.json"} def has_non_roman_characters(input_string): # Find any character outside the ASCII range non_roman_pattern = re.compile(r"[^\x00-\x7F]") # Search the input string for non-Roman characters match = non_roman_pattern.search(input_string) has_non_roman = match is not None return has_non_roman class VitsTokenizer(PreTrainedTokenizer): """ Construct a VITS tokenizer. Also supports MMS-TTS. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. language (`str`, *optional*): Language identifier. add_blank (`bool`, *optional*, defaults to `True`): Whether to insert token id 0 in between the other tokens. normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the input text by removing all casing and punctuation. phonemize (`bool`, *optional*, defaults to `True`): Whether to convert the input text into phonemes. is_uroman (`bool`, *optional*, defaults to `False`): Whether the `uroman` Romanizer needs to be applied to the input text prior to tokenizing. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, pad_token="<pad>", unk_token="<unk>", language=None, add_blank=True, normalize=True, phonemize=True, is_uroman=False, **kwargs, ) -> None: with open(vocab_file, encoding="utf-8") as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} self.language = language self.add_blank = add_blank self.normalize = normalize self.phonemize = phonemize self.is_uroman = is_uroman super().__init__( pad_token=pad_token, unk_token=unk_token, language=language, add_blank=add_blank, normalize=normalize, phonemize=phonemize, is_uroman=is_uroman, **kwargs, ) @property def vocab_size(self): return len(self.encoder) def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def normalize_text(self, input_string): """Lowercase the input string, respecting any special token ids that may be part or entirely upper-cased.""" all_vocabulary = list(self.encoder.keys()) + list(self.added_tokens_encoder.keys()) filtered_text = "" i = 0 while i < len(input_string): found_match = False for word in all_vocabulary: if input_string[i : i + len(word)] == word: filtered_text += word i += len(word) found_match = True break if not found_match: filtered_text += input_string[i].lower() i += 1 return filtered_text def _preprocess_char(self, text): """Special treatment of characters in certain languages""" if self.language == "ron": text = text.replace("ț", "ţ") return text def prepare_for_tokenization( self, text: str, is_split_into_words: bool = False, normalize: Optional[bool] = None, **kwargs ) -> Tuple[str, Dict[str, Any]]: """ Performs any necessary transformations before tokenization. This method should pop the arguments from kwargs and return the remaining `kwargs` as well. We test the `kwargs` at the end of the encoding process to be sure all the arguments have been used. Args: text (`str`): The text to prepare. is_split_into_words (`bool`, *optional*, defaults to `False`): Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. normalize (`bool`, *optional*, defaults to `None`): Whether or not to apply punctuation and casing normalization to the text inputs. Typically, VITS is trained on lower-cased and un-punctuated text. Hence, normalization is used to ensure that the input text consists only of lower-case characters. kwargs (`Dict[str, Any]`, *optional*): Keyword arguments to use for the tokenization. Returns: `Tuple[str, Dict[str, Any]]`: The prepared text and the unused kwargs. """ normalize = normalize if normalize is not None else self.normalize if normalize: # normalise for casing text = self.normalize_text(text) filtered_text = self._preprocess_char(text) if has_non_roman_characters(filtered_text) and self.is_uroman: if not is_uroman_available(): logger.warning( "Text to the tokenizer contains non-Roman characters. To apply the `uroman` pre-processing " "step automatically, ensure the `uroman` Romanizer is installed with: `pip install uroman` " "Note `uroman` requires python version >= 3.10" "Otherwise, apply the Romanizer manually as per the instructions: https://github.com/isi-nlp/uroman" ) else: uroman = ur.Uroman() filtered_text = uroman.romanize_string(filtered_text) if self.phonemize: if not is_phonemizer_available(): raise ImportError("Please install the `phonemizer` Python package to use this tokenizer.") filtered_text = phonemizer.phonemize( filtered_text, language="en-us", backend="espeak", strip=True, preserve_punctuation=True, with_stress=True, ) filtered_text = re.sub(r"\s+", " ", filtered_text) elif normalize: # strip any chars outside of the vocab (punctuation) filtered_text = "".join(list(filter(lambda char: char in self.encoder, filtered_text))).strip() return filtered_text, kwargs def _tokenize(self, text: str) -> List[str]: """Tokenize a string by inserting the `<pad>` token at the boundary between adjacent characters.""" tokens = list(text) if self.add_blank: interspersed = [self._convert_id_to_token(0)] * (len(tokens) * 2 + 1) interspersed[1::2] = tokens tokens = interspersed return tokens def convert_tokens_to_string(self, tokens: List[str]) -> str: if self.add_blank and len(tokens) > 1: tokens = tokens[1::2] return "".join(tokens) def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Union[Tuple[str], None]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") return (vocab_file,) __all__ = ["VitsTokenizer"]
transformers/src/transformers/models/vits/tokenization_vits.py/0
{ "file_path": "transformers/src/transformers/models/vits/tokenization_vits.py", "repo_id": "transformers", "token_count": 3979 }
146
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def get_xclip_config(model_name, num_frames): text_config = XCLIPTextConfig() # derive patch size from model name start_idx = model_name.find("patch") patch_size = int(model_name[start_idx + len("patch") : start_idx + len("patch") + 2]) vision_config = XCLIPVisionConfig(patch_size=patch_size, num_frames=num_frames) if "large" in model_name: text_config.hidden_size = 768 text_config.intermediate_size = 3072 text_config.num_attention_heads = 12 vision_config.hidden_size = 1024 vision_config.intermediate_size = 4096 vision_config.num_attention_heads = 16 vision_config.num_hidden_layers = 24 vision_config.mit_hidden_size = 768 vision_config.mit_intermediate_size = 3072 if model_name == "xclip-large-patch14-16-frames": vision_config.image_size = 336 config = XCLIPConfig.from_text_vision_configs(text_config, vision_config) if "large" in model_name: config.projection_dim = 768 return config def rename_key(name): # text encoder if name == "token_embedding.weight": name = name.replace("token_embedding.weight", "text_model.embeddings.token_embedding.weight") if name == "positional_embedding": name = name.replace("positional_embedding", "text_model.embeddings.position_embedding.weight") if "ln_1" in name: name = name.replace("ln_1", "layer_norm1") if "ln_2" in name: name = name.replace("ln_2", "layer_norm2") if "c_fc" in name: name = name.replace("c_fc", "fc1") if "c_proj" in name: name = name.replace("c_proj", "fc2") if name.startswith("transformer.resblocks"): name = name.replace("transformer.resblocks", "text_model.encoder.layers") if "attn.out_proj" in name and "message" not in name: name = name.replace("attn.out_proj", "self_attn.out_proj") if "ln_final" in name: name = name.replace("ln_final", "text_model.final_layer_norm") # visual encoder if name == "visual.class_embedding": name = name.replace("visual.class_embedding", "vision_model.embeddings.class_embedding") if name == "visual.positional_embedding": name = name.replace("visual.positional_embedding", "vision_model.embeddings.position_embedding.weight") if name.startswith("visual.transformer.resblocks"): name = name.replace("visual.transformer.resblocks", "vision_model.encoder.layers") if "visual.conv1" in name: name = name.replace("visual.conv1", "vision_model.embeddings.patch_embedding") if "visual.ln_pre" in name: name = name.replace("visual.ln_pre", "vision_model.pre_layernorm") if "visual.ln_post" in name: name = name.replace("visual.ln_post", "vision_model.post_layernorm") if "visual.proj" in name: name = name.replace("visual.proj", "visual_projection.weight") if "text_projection" in name: name = name.replace("text_projection", "text_projection.weight") # things on top if "prompts_visual_proj" in name: name = name.replace("prompts_visual_proj", "prompts_visual_projection") if "prompts_visual_ln" in name: name = name.replace("prompts_visual_ln", "prompts_visual_layernorm") # mit if name == "mit.positional_embedding": name = name.replace("positional", "position") if name.startswith("mit.resblocks"): name = name.replace("mit.resblocks", "mit.encoder.layers") # prompts generator if name.startswith("prompts_generator.norm"): name = name.replace("prompts_generator.norm", "prompts_generator.layernorm") return name def convert_state_dict(orig_state_dict, config): for key in orig_state_dict.copy().keys(): val = orig_state_dict.pop(key) if "attn.in_proj" in key: key_split = key.split(".") if key.startswith("visual"): layer_num = key_split[3] dim = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: orig_state_dict[f"vision_model.encoder.layers.{layer_num}.message_attn.q_proj.weight"] = val[ :dim, : ] orig_state_dict[f"vision_model.encoder.layers.{layer_num}.message_attn.k_proj.weight"] = val[ dim : dim * 2, : ] orig_state_dict[f"vision_model.encoder.layers.{layer_num}.message_attn.v_proj.weight"] = val[ -dim:, : ] else: orig_state_dict[f"vision_model.encoder.layers.{layer_num}.message_attn.q_proj.bias"] = val[ :dim ] orig_state_dict[f"vision_model.encoder.layers.{layer_num}.message_attn.k_proj.bias"] = val[ dim : dim * 2 ] orig_state_dict[f"vision_model.encoder.layers.{layer_num}.message_attn.v_proj.bias"] = val[ -dim: ] else: if "weight" in key: orig_state_dict[f"vision_model.encoder.layers.{layer_num}.self_attn.q_proj.weight"] = val[ :dim, : ] orig_state_dict[f"vision_model.encoder.layers.{layer_num}.self_attn.k_proj.weight"] = val[ dim : dim * 2, : ] orig_state_dict[f"vision_model.encoder.layers.{layer_num}.self_attn.v_proj.weight"] = val[ -dim:, : ] else: orig_state_dict[f"vision_model.encoder.layers.{layer_num}.self_attn.q_proj.bias"] = val[:dim] orig_state_dict[f"vision_model.encoder.layers.{layer_num}.self_attn.k_proj.bias"] = val[ dim : dim * 2 ] orig_state_dict[f"vision_model.encoder.layers.{layer_num}.self_attn.v_proj.bias"] = val[-dim:] elif key.startswith("mit"): layer_num = key_split[2] dim = config.vision_config.mit_hidden_size if "weight" in key: orig_state_dict[f"mit.encoder.layers.{layer_num}.self_attn.q_proj.weight"] = val[:dim, :] orig_state_dict[f"mit.encoder.layers.{layer_num}.self_attn.k_proj.weight"] = val[dim : dim * 2, :] orig_state_dict[f"mit.encoder.layers.{layer_num}.self_attn.v_proj.weight"] = val[-dim:, :] else: orig_state_dict[f"mit.encoder.layers.{layer_num}.self_attn.q_proj.bias"] = val[:dim] orig_state_dict[f"mit.encoder.layers.{layer_num}.self_attn.k_proj.bias"] = val[dim : dim * 2] orig_state_dict[f"mit.encoder.layers.{layer_num}.self_attn.v_proj.bias"] = val[-dim:] else: layer_num = key_split[2] dim = config.text_config.hidden_size if "weight" in key: orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.q_proj.weight"] = val[:dim, :] orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.k_proj.weight"] = val[ dim : dim * 2, : ] orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.v_proj.weight"] = val[-dim:, :] else: orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.q_proj.bias"] = val[:dim] orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.k_proj.bias"] = val[ dim : dim * 2 ] orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.v_proj.bias"] = val[-dim:] else: new_key_name = rename_key(key) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: val = val.T orig_state_dict[new_key_name] = val return orig_state_dict def prepare_video(num_frames): if num_frames == 8: filename = "eating_spaghetti_8_frames.npy" elif num_frames == 16: filename = "eating_spaghetti.npy" elif num_frames == 32: filename = "eating_spaghetti_32_frames.npy" file = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video", filename=filename, repo_type="dataset", ) video = np.load(file) return list(video) def convert_xclip_checkpoint(model_name, pytorch_dump_folder_path=None, push_to_hub=False): model_to_url = { # fully supervised kinetics-400 checkpoints "xclip-base-patch32": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth", "xclip-base-patch32-16-frames": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth" ), "xclip-base-patch16": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth", "xclip-base-patch16-16-frames": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth" ), "xclip-large-patch14": "https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb", "xclip-large-patch14-16-frames": "https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f", # fully supervised kinetics-600 checkpoints "xclip-base-patch16-kinetics-600": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth" ), "xclip-base-patch16-kinetics-600-16-frames": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth" ), "xclip-large-patch14-kinetics-600": "https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be", # few shot "xclip-base-patch16-hmdb-2-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth" ), "xclip-base-patch16-hmdb-4-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth" ), "xclip-base-patch16-hmdb-8-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth" ), "xclip-base-patch16-hmdb-16-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth" ), "xclip-base-patch16-ucf-2-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth" ), "xclip-base-patch16-ucf-4-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth" ), "xclip-base-patch16-ucf-8-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth" ), "xclip-base-patch16-ucf-16-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth" ), # zero shot "xclip-base-patch16-zero-shot": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth", } checkpoint_url = model_to_url[model_name] num_frames = 8 if "16-frames" in model_name: num_frames = 16 elif "shot" in model_name: num_frames = 32 config = get_xclip_config(model_name, num_frames) model = XCLIPModel(config) model.eval() if "drive" in checkpoint_url: output = "pytorch_model.bin" gdown.cached_download(checkpoint_url, output, quiet=False) state_dict = torch.load(output, map_location="cpu")["model"] else: state_dict = torch.hub.load_state_dict_from_url(checkpoint_url)["model"] state_dict = convert_state_dict(state_dict, config) model = XCLIPModel(config) missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() size = 336 if model_name == "xclip-large-patch14-16-frames" else 224 image_processor = VideoMAEImageProcessor(size=size) slow_tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32") fast_tokenizer = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32") processor = XCLIPProcessor(image_processor=image_processor, tokenizer=fast_tokenizer) video = prepare_video(num_frames) inputs = processor( text=["playing sports", "eating spaghetti", "go shopping"], videos=video, return_tensors="pt", padding=True ) print("Shape of pixel values:", inputs.pixel_values.shape) with torch.no_grad(): outputs = model(**inputs) # Verify outputs logits_per_video = outputs.logits_per_video probs = logits_per_video.softmax(dim=1) print("Probs:", probs) # kinetics-400 if model_name == "xclip-base-patch32": expected_probs = torch.tensor([[0.0019, 0.9951, 0.0030]]) elif model_name == "xclip-base-patch32-16-frames": expected_probs = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]]) elif model_name == "xclip-base-patch16": expected_probs = torch.tensor([[0.0083, 0.9681, 0.0236]]) elif model_name == "xclip-base-patch16-16-frames": expected_probs = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]]) elif model_name == "xclip-large-patch14": expected_probs = torch.tensor([[0.0062, 0.9864, 0.0075]]) elif model_name == "xclip-large-patch14-16-frames": expected_probs = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]]) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": expected_probs = torch.tensor([[0.0555, 0.8914, 0.0531]]) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": expected_probs = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]]) elif model_name == "xclip-large-patch14-kinetics-600": expected_probs = torch.tensor([[0.0036, 0.9920, 0.0045]]) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": expected_probs = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]]) elif model_name == "xclip-base-patch16-hmdb-4-shot": expected_probs = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]]) elif model_name == "xclip-base-patch16-hmdb-8-shot": expected_probs = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]]) elif model_name == "xclip-base-patch16-hmdb-16-shot": expected_probs = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]]) elif model_name == "xclip-base-patch16-ucf-2-shot": expected_probs = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]]) elif model_name == "xclip-base-patch16-ucf-4-shot": expected_probs = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]]) elif model_name == "xclip-base-patch16-ucf-8-shot": expected_probs = torch.tensor([[0.0027, 0.9904, 0.0070]]) elif model_name == "xclip-base-patch16-ucf-16-shot": expected_probs = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]]) # zero shot elif model_name == "xclip-base-patch16-zero-shot": expected_probs = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]]) else: raise ValueError(f"Model name {model_name} not supported") assert torch.allclose(probs, expected_probs, atol=1e-3) print("Looks ok!") if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print("Pushing model, processor and slow tokenizer files to the hub...") model.push_to_hub(model_name, organization="nielsr") processor.push_to_hub(model_name, organization="nielsr") slow_tokenizer.push_to_hub(model_name, organization="nielsr") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="xclip-base-patch32", type=str, help="Name of the model.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) args = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
transformers/src/transformers/models/x_clip/convert_x_clip_original_pytorch_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/x_clip/convert_x_clip_original_pytorch_to_hf.py", "repo_id": "transformers", "token_count": 8829 }
147
# coding=utf-8 # Copyright 2019 The Open AI Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for XLM.""" import json import os import re import sys import unicodedata from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = { "vocab_file": "vocab.json", "merges_file": "merges.txt", } def get_pairs(word): """ Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length strings) """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs def lowercase_and_remove_accent(text): """ Lowercase and strips accents from a piece of text based on https://github.com/facebookresearch/XLM/blob/master/tools/lowercase_and_remove_accent.py """ text = " ".join(text) text = text.lower() text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output).lower().split(" ") def replace_unicode_punct(text): """ Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/replace-unicode-punctuation.perl """ text = text.replace(",", ",") text = re.sub(r"。\s*", ". ", text) text = text.replace("、", ",") text = text.replace("”", '"') text = text.replace("“", '"') text = text.replace("∶", ":") text = text.replace(":", ":") text = text.replace("?", "?") text = text.replace("《", '"') text = text.replace("》", '"') text = text.replace(")", ")") text = text.replace("!", "!") text = text.replace("(", "(") text = text.replace(";", ";") text = text.replace("1", "1") text = text.replace("」", '"') text = text.replace("「", '"') text = text.replace("0", "0") text = text.replace("3", "3") text = text.replace("2", "2") text = text.replace("5", "5") text = text.replace("6", "6") text = text.replace("9", "9") text = text.replace("7", "7") text = text.replace("8", "8") text = text.replace("4", "4") text = re.sub(r".\s*", ". ", text) text = text.replace("~", "~") text = text.replace("’", "'") text = text.replace("…", "...") text = text.replace("━", "-") text = text.replace("〈", "<") text = text.replace("〉", ">") text = text.replace("【", "[") text = text.replace("】", "]") text = text.replace("%", "%") return text def remove_non_printing_char(text): """ Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/remove-non-printing-char.perl """ output = [] for char in text: cat = unicodedata.category(char) if cat.startswith("C"): continue output.append(char) return "".join(output) def romanian_preprocessing(text): """Sennrich's WMT16 scripts for Romanian preprocessing, used by model `FacebookAI/xlm-mlm-enro-1024`""" # https://github.com/rsennrich/wmt16-scripts/blob/master/preprocess/normalise-romanian.py text = text.replace("\u015e", "\u0218").replace("\u015f", "\u0219") text = text.replace("\u0162", "\u021a").replace("\u0163", "\u021b") # https://github.com/rsennrich/wmt16-scripts/blob/master/preprocess/remove-diacritics.py text = text.replace("\u0218", "S").replace("\u0219", "s") # s-comma text = text.replace("\u021a", "T").replace("\u021b", "t") # t-comma text = text.replace("\u0102", "A").replace("\u0103", "a") text = text.replace("\u00c2", "A").replace("\u00e2", "a") text = text.replace("\u00ce", "I").replace("\u00ee", "i") return text class XLMTokenizer(PreTrainedTokenizer): """ Construct an XLM tokenizer. Based on Byte-Pair Encoding. The tokenization process is the following: - Moses preprocessing and tokenization for most supported languages. - Language specific tokenization for Chinese (Jieba), Japanese (KyTea) and Thai (PyThaiNLP). - Optionally lowercases and normalizes all inputs text. - The arguments `special_tokens` and the function `set_special_tokens`, can be used to add additional symbols (like "__classify__") to a vocabulary. - The `lang2id` attribute maps the languages supported by the model with their IDs if provided (automatically set for pretrained vocabularies). - The `id2lang` attributes does reverse mapping if provided (automatically set for pretrained vocabularies). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Vocabulary file. merges_file (`str`): Merges file. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"</s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"<special1>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. additional_special_tokens (`List[str]`, *optional*, defaults to `['<special0>', '<special1>', '<special2>', '<special3>', '<special4>', '<special5>', '<special6>', '<special7>', '<special8>', '<special9>']`): List of additional special tokens. lang2id (`Dict[str, int]`, *optional*): Dictionary mapping languages string identifiers to their IDs. id2lang (`Dict[int, str]`, *optional*): Dictionary mapping language IDs to their string identifiers. do_lowercase_and_remove_accent (`bool`, *optional*, defaults to `True`): Whether to lowercase and remove accents when tokenizing. """ vocab_files_names = VOCAB_FILES_NAMES def __init__( self, vocab_file, merges_file, unk_token="<unk>", bos_token="<s>", sep_token="</s>", pad_token="<pad>", cls_token="</s>", mask_token="<special1>", additional_special_tokens=[ "<special0>", "<special1>", "<special2>", "<special3>", "<special4>", "<special5>", "<special6>", "<special7>", "<special8>", "<special9>", ], lang2id=None, id2lang=None, do_lowercase_and_remove_accent=True, **kwargs, ): try: import sacremoses except ImportError: raise ImportError( "You need to install sacremoses to use XLMTokenizer. " "See https://pypi.org/project/sacremoses/ for installation." ) self.sm = sacremoses # cache of sm.MosesPunctNormalizer instance self.cache_moses_punct_normalizer = {} # cache of sm.MosesTokenizer instance self.cache_moses_tokenizer = {} self.lang_with_custom_tokenizer = {"zh", "th", "ja"} # True for current supported model (v1.2.0), False for XLM-17 & 100 self.do_lowercase_and_remove_accent = do_lowercase_and_remove_accent self.lang2id = lang2id self.id2lang = id2lang if lang2id is not None and id2lang is not None: assert len(lang2id) == len(id2lang) self.ja_word_tokenizer = None self.zh_word_tokenizer = None with open(vocab_file, encoding="utf-8") as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} with open(merges_file, encoding="utf-8") as merges_handle: merges = merges_handle.read().split("\n")[:-1] merges = [tuple(merge.split()[:2]) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} super().__init__( unk_token=unk_token, bos_token=bos_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, additional_special_tokens=additional_special_tokens, lang2id=lang2id, id2lang=id2lang, do_lowercase_and_remove_accent=do_lowercase_and_remove_accent, **kwargs, ) @property def do_lower_case(self): return self.do_lowercase_and_remove_accent def moses_punct_norm(self, text, lang): if lang not in self.cache_moses_punct_normalizer: punct_normalizer = self.sm.MosesPunctNormalizer(lang=lang) self.cache_moses_punct_normalizer[lang] = punct_normalizer else: punct_normalizer = self.cache_moses_punct_normalizer[lang] return punct_normalizer.normalize(text) def moses_tokenize(self, text, lang): if lang not in self.cache_moses_tokenizer: moses_tokenizer = self.sm.MosesTokenizer(lang=lang) self.cache_moses_tokenizer[lang] = moses_tokenizer else: moses_tokenizer = self.cache_moses_tokenizer[lang] return moses_tokenizer.tokenize(text, return_str=False, escape=False) def moses_pipeline(self, text, lang): text = replace_unicode_punct(text) text = self.moses_punct_norm(text, lang) text = remove_non_printing_char(text) return text def ja_tokenize(self, text): if self.ja_word_tokenizer is None: try: import Mykytea self.ja_word_tokenizer = Mykytea.Mykytea( f"-model {os.path.expanduser('~')}/local/share/kytea/model.bin" ) except (AttributeError, ImportError): logger.error( "Make sure you install KyTea (https://github.com/neubig/kytea) and it's python wrapper" " (https://github.com/chezou/Mykytea-python) with the following steps" ) logger.error("1. git clone git@github.com:neubig/kytea.git && cd kytea") logger.error("2. autoreconf -i") logger.error("3. ./configure --prefix=$HOME/local") logger.error("4. make && make install") logger.error("5. pip install kytea") raise return list(self.ja_word_tokenizer.getWS(text)) @property def vocab_size(self): return len(self.encoder) def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def bpe(self, token): word = tuple(token[:-1]) + (token[-1] + "</w>",) if token in self.cache: return self.cache[token] pairs = get_pairs(word) if not pairs: return token + "</w>" while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = " ".join(word) if word == "\n </w>": word = "\n</w>" self.cache[token] = word return word def _tokenize(self, text, lang="en", bypass_tokenizer=False): """ Tokenize a string given language code. For Chinese, Japanese and Thai, we use a language specific tokenizer. Otherwise, we use Moses. Details of tokenization: - [sacremoses](https://github.com/alvations/sacremoses): port of Moses - Install with `pip install sacremoses` - [pythainlp](https://github.com/PyThaiNLP/pythainlp): Thai tokenizer - Install with `pip install pythainlp` - [kytea](https://github.com/chezou/Mykytea-python): Japanese tokenizer, wrapper of [KyTea](https://github.com/neubig/kytea) - Install with the following steps: :: git clone git@github.com:neubig/kytea.git && cd kytea autoreconf -i ./configure --prefix=$HOME/local make && make install pip install kytea - [jieba](https://github.com/fxsjy/jieba): Chinese tokenizer (*) - Install with `pip install jieba` (*) The original XLM used [Stanford Segmenter](https://nlp.stanford.edu/software/stanford-segmenter-2018-10-16.zip). However, the wrapper (`nltk.tokenize.stanford_segmenter`) is slow due to JVM overhead, and it will be deprecated. Jieba is a lot faster and pip-installable. Note there is some mismatch with the Stanford Segmenter. It should be fine if you fine-tune the model with Chinese supervisionself. If you want the same exact behaviour, use the original XLM [preprocessing script](https://github.com/facebookresearch/XLM/tree/master/tools) to tokenize the sentence externally, and set `bypass_tokenizer=True` to bypass the tokenizer. Args: - lang: ISO language code (default = 'en') (string). Languages should belong of the model supported languages. However, we don't enforce it. - bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False) (bool). If True, we only apply BPE. Returns: List of tokens. """ if lang and self.lang2id and lang not in self.lang2id: logger.error( "Supplied language code not found in lang2id mapping. Please check that your language is supported by" " the loaded pretrained model." ) if bypass_tokenizer: text = text.split() elif lang not in self.lang_with_custom_tokenizer: text = self.moses_pipeline(text, lang=lang) # TODO: make sure we are using `FacebookAI/xlm-mlm-enro-1024`, since XLM-100 doesn't have this step if lang == "ro": text = romanian_preprocessing(text) text = self.moses_tokenize(text, lang=lang) elif lang == "th": text = self.moses_pipeline(text, lang=lang) try: if "pythainlp" not in sys.modules: from pythainlp.tokenize import word_tokenize as th_word_tokenize else: th_word_tokenize = sys.modules["pythainlp"].word_tokenize except (AttributeError, ImportError): logger.error( "Make sure you install PyThaiNLP (https://github.com/PyThaiNLP/pythainlp) with the following steps" ) logger.error("1. pip install pythainlp") raise text = th_word_tokenize(text) elif lang == "zh": try: if "jieba" not in sys.modules: import jieba else: jieba = sys.modules["jieba"] except (AttributeError, ImportError): logger.error("Make sure you install Jieba (https://github.com/fxsjy/jieba) with the following steps") logger.error("1. pip install jieba") raise text = " ".join(jieba.cut(text)) text = self.moses_pipeline(text, lang=lang) text = text.split() elif lang == "ja": text = self.moses_pipeline(text, lang=lang) text = self.ja_tokenize(text) else: raise ValueError("It should not reach here") if self.do_lowercase_and_remove_accent and not bypass_tokenizer: text = lowercase_and_remove_accent(text) split_tokens = [] for token in text: if token: split_tokens.extend(list(self.bpe(token).split(" "))) return split_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" out_string = "".join(tokens).replace("</w>", " ").strip() return out_string def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLM sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s> B </s>` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ bos = [self.bos_token_id] sep = [self.sep_token_id] if token_ids_1 is None: return bos + token_ids_0 + sep return bos + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLM sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) merge_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") index = 0 with open(merge_file, "w", encoding="utf-8") as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) index = token_index writer.write(" ".join(bpe_tokens) + "\n") index += 1 return vocab_file, merge_file def __getstate__(self): state = self.__dict__.copy() state["sm"] = None return state def __setstate__(self, d): self.__dict__ = d try: import sacremoses except ImportError: raise ImportError( "You need to install sacremoses to use XLMTokenizer. " "See https://pypi.org/project/sacremoses/ for installation." ) self.sm = sacremoses __all__ = ["XLMTokenizer"]
transformers/src/transformers/models/xlm/tokenization_xlm.py/0
{ "file_path": "transformers/src/transformers/models/xlm/tokenization_xlm.py", "repo_id": "transformers", "token_count": 11016 }
148
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch XLNet model. """ import warnings from dataclasses import dataclass from typing import List, Optional, Tuple, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...generation import GenerationMixin from ...modeling_utils import PoolerAnswerClass, PoolerEndLogits, PoolerStartLogits, PreTrainedModel, SequenceSummary from ...pytorch_utils import apply_chunking_to_forward from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_xlnet import XLNetConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "xlnet/xlnet-base-cased" _CONFIG_FOR_DOC = "XLNetConfig" def build_tf_xlnet_to_pytorch_map(model, config, tf_weights=None): """ A map of modules from TF to PyTorch. I use a map to keep the PyTorch model as identical to the original PyTorch model as possible. """ tf_to_pt_map = {} if hasattr(model, "transformer"): if hasattr(model, "lm_loss"): # We will load also the output bias tf_to_pt_map["model/lm_loss/bias"] = model.lm_loss.bias if hasattr(model, "sequence_summary") and "model/sequnece_summary/summary/kernel" in tf_weights: # We will load also the sequence summary tf_to_pt_map["model/sequnece_summary/summary/kernel"] = model.sequence_summary.summary.weight tf_to_pt_map["model/sequnece_summary/summary/bias"] = model.sequence_summary.summary.bias if ( hasattr(model, "logits_proj") and config.finetuning_task is not None and f"model/regression_{config.finetuning_task}/logit/kernel" in tf_weights ): tf_to_pt_map[f"model/regression_{config.finetuning_task}/logit/kernel"] = model.logits_proj.weight tf_to_pt_map[f"model/regression_{config.finetuning_task}/logit/bias"] = model.logits_proj.bias # Now load the rest of the transformer model = model.transformer # Embeddings and output tf_to_pt_map.update( { "model/transformer/word_embedding/lookup_table": model.word_embedding.weight, "model/transformer/mask_emb/mask_emb": model.mask_emb, } ) # Transformer blocks for i, b in enumerate(model.layer): layer_str = f"model/transformer/layer_{i}/" tf_to_pt_map.update( { layer_str + "rel_attn/LayerNorm/gamma": b.rel_attn.layer_norm.weight, layer_str + "rel_attn/LayerNorm/beta": b.rel_attn.layer_norm.bias, layer_str + "rel_attn/o/kernel": b.rel_attn.o, layer_str + "rel_attn/q/kernel": b.rel_attn.q, layer_str + "rel_attn/k/kernel": b.rel_attn.k, layer_str + "rel_attn/r/kernel": b.rel_attn.r, layer_str + "rel_attn/v/kernel": b.rel_attn.v, layer_str + "ff/LayerNorm/gamma": b.ff.layer_norm.weight, layer_str + "ff/LayerNorm/beta": b.ff.layer_norm.bias, layer_str + "ff/layer_1/kernel": b.ff.layer_1.weight, layer_str + "ff/layer_1/bias": b.ff.layer_1.bias, layer_str + "ff/layer_2/kernel": b.ff.layer_2.weight, layer_str + "ff/layer_2/bias": b.ff.layer_2.bias, } ) # Relative positioning biases if config.untie_r: r_r_list = [] r_w_list = [] r_s_list = [] seg_embed_list = [] for b in model.layer: r_r_list.append(b.rel_attn.r_r_bias) r_w_list.append(b.rel_attn.r_w_bias) r_s_list.append(b.rel_attn.r_s_bias) seg_embed_list.append(b.rel_attn.seg_embed) else: r_r_list = [model.r_r_bias] r_w_list = [model.r_w_bias] r_s_list = [model.r_s_bias] seg_embed_list = [model.seg_embed] tf_to_pt_map.update( { "model/transformer/r_r_bias": r_r_list, "model/transformer/r_w_bias": r_w_list, "model/transformer/r_s_bias": r_s_list, "model/transformer/seg_embed": seg_embed_list, } ) return tf_to_pt_map def load_tf_weights_in_xlnet(model, config, tf_path): """Load tf checkpoints in a pytorch model""" try: import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise # Load weights from TF model init_vars = tf.train.list_variables(tf_path) tf_weights = {} for name, shape in init_vars: logger.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(tf_path, name) tf_weights[name] = array # Build TF to PyTorch weights loading map tf_to_pt_map = build_tf_xlnet_to_pytorch_map(model, config, tf_weights) for name, pointer in tf_to_pt_map.items(): logger.info(f"Importing {name}") if name not in tf_weights: logger.info(f"{name} not in tf pre-trained weights, skipping") continue array = tf_weights[name] # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if "kernel" in name and ("ff" in name or "summary" in name or "logit" in name): logger.info("Transposing") array = np.transpose(array) if isinstance(pointer, list): # Here we will split the TF weights assert ( len(pointer) == array.shape[0] ), f"Pointer length {len(pointer)} and array length {array.shape[0]} mismatched" for i, p_i in enumerate(pointer): arr_i = array[i, ...] try: assert ( p_i.shape == arr_i.shape ), f"Pointer shape {p_i.shape} and array shape {arr_i.shape} mismatched" except AssertionError as e: e.args += (p_i.shape, arr_i.shape) raise logger.info(f"Initialize PyTorch weight {name} for layer {i}") p_i.data = torch.from_numpy(arr_i) else: try: assert ( pointer.shape == array.shape ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info(f"Initialize PyTorch weight {name}") pointer.data = torch.from_numpy(array) tf_weights.pop(name, None) tf_weights.pop(name + "/Adam", None) tf_weights.pop(name + "/Adam_1", None) logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}") return model class XLNetRelativeAttention(nn.Module): def __init__(self, config): super().__init__() if config.d_model % config.n_head != 0: raise ValueError( f"The hidden size ({config.d_model}) is not a multiple of the number of attention " f"heads ({config.n_head}" ) self.n_head = config.n_head self.d_head = config.d_head self.d_model = config.d_model self.scale = 1 / (config.d_head**0.5) self.q = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head)) self.k = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head)) self.v = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head)) self.o = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head)) self.r = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head)) self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head)) self.r_s_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head)) self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head)) self.seg_embed = nn.Parameter(torch.FloatTensor(2, self.n_head, self.d_head)) self.layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.dropout) def prune_heads(self, heads): raise NotImplementedError @staticmethod def rel_shift(x, klen=-1): """perform relative shift to form the relative attention score.""" x_size = x.shape x = x.reshape(x_size[1], x_size[0], x_size[2], x_size[3]) x = x[1:, ...] x = x.reshape(x_size[0], x_size[1] - 1, x_size[2], x_size[3]) # x = x[:, 0:klen, :, :] x = torch.index_select(x, 1, torch.arange(klen, device=x.device, dtype=torch.long)) return x @staticmethod def rel_shift_bnij(x, klen=-1): x_size = x.shape x = x.reshape(x_size[0], x_size[1], x_size[3], x_size[2]) x = x[:, :, 1:, :] x = x.reshape(x_size[0], x_size[1], x_size[2], x_size[3] - 1) # Note: the tensor-slice form was faster in my testing than torch.index_select # However, tracing doesn't like the nature of the slice, and if klen changes # during the run then it'll fail, whereas index_select will be fine. x = torch.index_select(x, 3, torch.arange(klen, device=x.device, dtype=torch.long)) # x = x[:, :, :, :klen] return x def rel_attn_core( self, q_head, k_head_h, v_head_h, k_head_r, seg_mat=None, attn_mask=None, head_mask=None, output_attentions=False, ): """Core relative positional attention operations.""" # content based attention score ac = torch.einsum("ibnd,jbnd->bnij", q_head + self.r_w_bias, k_head_h) # position based attention score bd = torch.einsum("ibnd,jbnd->bnij", q_head + self.r_r_bias, k_head_r) bd = self.rel_shift_bnij(bd, klen=ac.shape[3]) # segment based attention score if seg_mat is None: ef = 0 else: ef = torch.einsum("ibnd,snd->ibns", q_head + self.r_s_bias, self.seg_embed) ef = torch.einsum("ijbs,ibns->bnij", seg_mat, ef) # merge attention scores and perform masking attn_score = (ac + bd + ef) * self.scale if attn_mask is not None: # attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask if attn_mask.dtype == torch.float16: attn_score = attn_score - 65500 * torch.einsum("ijbn->bnij", attn_mask) else: attn_score = attn_score - 1e30 * torch.einsum("ijbn->bnij", attn_mask) # attention probability attn_prob = nn.functional.softmax(attn_score, dim=3) attn_prob = self.dropout(attn_prob) # Mask heads if we want to if head_mask is not None: attn_prob = attn_prob * torch.einsum("ijbn->bnij", head_mask) # attention output attn_vec = torch.einsum("bnij,jbnd->ibnd", attn_prob, v_head_h) if output_attentions: return attn_vec, torch.einsum("bnij->ijbn", attn_prob) return attn_vec def post_attention(self, h, attn_vec, residual=True): """Post-attention processing.""" # post-attention projection (back to `d_model`) attn_out = torch.einsum("ibnd,hnd->ibh", attn_vec, self.o) attn_out = self.dropout(attn_out) if residual: attn_out = attn_out + h output = self.layer_norm(attn_out) return output def forward( self, h, g, attn_mask_h, attn_mask_g, r, seg_mat, mems=None, target_mapping=None, head_mask=None, output_attentions=False, ): if g is not None: # Two-stream attention with relative positional encoding. # content based attention score if mems is not None and mems.dim() > 1: cat = torch.cat([mems, h], dim=0) else: cat = h # content-based key head k_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.k) # content-based value head v_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.v) # position-based key head k_head_r = torch.einsum("ibh,hnd->ibnd", r, self.r) # h-stream # content-stream query head q_head_h = torch.einsum("ibh,hnd->ibnd", h, self.q) # core attention ops attn_vec_h = self.rel_attn_core( q_head_h, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_h, head_mask=head_mask, output_attentions=output_attentions, ) if output_attentions: attn_vec_h, attn_prob_h = attn_vec_h # post processing output_h = self.post_attention(h, attn_vec_h) # g-stream # query-stream query head q_head_g = torch.einsum("ibh,hnd->ibnd", g, self.q) # core attention ops if target_mapping is not None: q_head_g = torch.einsum("mbnd,mlb->lbnd", q_head_g, target_mapping) attn_vec_g = self.rel_attn_core( q_head_g, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_g, head_mask=head_mask, output_attentions=output_attentions, ) if output_attentions: attn_vec_g, attn_prob_g = attn_vec_g attn_vec_g = torch.einsum("lbnd,mlb->mbnd", attn_vec_g, target_mapping) else: attn_vec_g = self.rel_attn_core( q_head_g, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_g, head_mask=head_mask, output_attentions=output_attentions, ) if output_attentions: attn_vec_g, attn_prob_g = attn_vec_g # post processing output_g = self.post_attention(g, attn_vec_g) if output_attentions: attn_prob = attn_prob_h, attn_prob_g else: # Multi-head attention with relative positional encoding if mems is not None and mems.dim() > 1: cat = torch.cat([mems, h], dim=0) else: cat = h # content heads q_head_h = torch.einsum("ibh,hnd->ibnd", h, self.q) k_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.k) v_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.v) # positional heads # type casting for fp16 support k_head_r = torch.einsum("ibh,hnd->ibnd", r.type(self.r.dtype), self.r) # core attention ops attn_vec = self.rel_attn_core( q_head_h, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_h, head_mask=head_mask, output_attentions=output_attentions, ) if output_attentions: attn_vec, attn_prob = attn_vec # post processing output_h = self.post_attention(h, attn_vec) output_g = None outputs = (output_h, output_g) if output_attentions: outputs = outputs + (attn_prob,) return outputs class XLNetFeedForward(nn.Module): def __init__(self, config): super().__init__() self.layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps) self.layer_1 = nn.Linear(config.d_model, config.d_inner) self.layer_2 = nn.Linear(config.d_inner, config.d_model) self.dropout = nn.Dropout(config.dropout) if isinstance(config.ff_activation, str): self.activation_function = ACT2FN[config.ff_activation] else: self.activation_function = config.ff_activation def forward(self, inp): output = inp output = self.layer_1(output) output = self.activation_function(output) output = self.dropout(output) output = self.layer_2(output) output = self.dropout(output) output = self.layer_norm(output + inp) return output class XLNetLayer(nn.Module): def __init__(self, config): super().__init__() self.rel_attn = XLNetRelativeAttention(config) self.ff = XLNetFeedForward(config) self.dropout = nn.Dropout(config.dropout) self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 def forward( self, output_h, output_g, attn_mask_h, attn_mask_g, r, seg_mat, mems=None, target_mapping=None, head_mask=None, output_attentions=False, ): outputs = self.rel_attn( output_h, output_g, attn_mask_h, attn_mask_g, r, seg_mat, mems=mems, target_mapping=target_mapping, head_mask=head_mask, output_attentions=output_attentions, ) output_h, output_g = outputs[:2] if output_g is not None: output_g = apply_chunking_to_forward( self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, output_g ) output_h = apply_chunking_to_forward(self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, output_h) outputs = (output_h, output_g) + outputs[2:] # Add again attentions if there are there return outputs def ff_chunk(self, output_x): output_x = self.ff(output_x) return output_x class XLNetPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = XLNetConfig load_tf_weights = load_tf_weights_in_xlnet base_model_prefix = "transformer" def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, XLNetRelativeAttention): for param in [ module.q, module.k, module.v, module.o, module.r, module.r_r_bias, module.r_s_bias, module.r_w_bias, module.seg_embed, ]: param.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, XLNetModel): module.mask_emb.data.normal_(mean=0.0, std=self.config.initializer_range) @dataclass class XLNetModelOutput(ModelOutput): """ Output type of [`XLNetModel`]. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_predict, hidden_size)`): Sequence of hidden-states at the last layer of the model. `num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`. mems (`List[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: torch.FloatTensor mems: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class XLNetLMHeadModelOutput(ModelOutput): """ Output type of [`XLNetLMHeadModel`]. Args: loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided) Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, num_predict, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). `num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`. mems (`List[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None mems: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class XLNetForSequenceClassificationOutput(ModelOutput): """ Output type of [`XLNetForSequenceClassification`]. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `label` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). mems (`List[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None mems: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class XLNetForTokenClassificationOutput(ModelOutput): """ Output type of [`XLNetForTokenClassificationOutput`]. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided) : Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`): Classification scores (before SoftMax). mems (`List[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None mems: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class XLNetForMultipleChoiceOutput(ModelOutput): """ Output type of [`XLNetForMultipleChoice`]. Args: loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`): *num_choices* is the second dimension of the input tensors. (see *input_ids* above). Classification scores (before SoftMax). mems (`List[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None mems: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class XLNetForQuestionAnsweringSimpleOutput(ModelOutput): """ Output type of [`XLNetForQuestionAnsweringSimple`]. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length,)`): Span-start scores (before SoftMax). end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length,)`): Span-end scores (before SoftMax). mems (`List[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None start_logits: torch.FloatTensor = None end_logits: torch.FloatTensor = None mems: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class XLNetForQuestionAnsweringOutput(ModelOutput): """ Output type of [`XLNetForQuestionAnswering`]. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned if both `start_positions` and `end_positions` are provided): Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses. start_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): Log probabilities for the top config.start_n_top start token possibilities (beam-search). start_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): Indices for the top config.start_n_top start token possibilities (beam-search). end_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): Log probabilities for the top `config.start_n_top * config.end_n_top` end token possibilities (beam-search). end_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): Indices for the top `config.start_n_top * config.end_n_top` end token possibilities (beam-search). cls_logits (`torch.FloatTensor` of shape `(batch_size,)`, *optional*, returned if `start_positions` or `end_positions` is not provided): Log probabilities for the `is_impossible` label of the answers. mems (`List[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None start_top_log_probs: Optional[torch.FloatTensor] = None start_top_index: Optional[torch.LongTensor] = None end_top_log_probs: Optional[torch.FloatTensor] = None end_top_index: Optional[torch.LongTensor] = None cls_logits: Optional[torch.FloatTensor] = None mems: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None XLNET_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`XLNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ XLNET_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) mems (`List[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states (see `mems` output below) . Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. `use_mems` has to be set to `True` to make use of `mems`. perm_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, sequence_length)`, *optional*): Mask to indicate the attention pattern for each input token with values selected in `[0, 1]`: - if `perm_mask[k, i, j] = 0`, i attend to j in batch k; - if `perm_mask[k, i, j] = 1`, i does not attend to j in batch k. If not set, each token attends to all the others (full bidirectional attention). Only used during pretraining (to define factorization order) or for sequential decoding (generation). target_mapping (`torch.FloatTensor` of shape `(batch_size, num_predict, sequence_length)`, *optional*): Mask to indicate the output tokens to use. If `target_mapping[k, i, j] = 1`, the i-th predict in batch k is on the j-th token. Only used during pretraining for partial prediction or for sequential decoding (generation). token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) input_mask (`torch.FloatTensor` of shape `{0}`, *optional*): Mask to avoid performing attention on padding token indices. Negative of `attention_mask`, i.e. with 0 for real tokens and 1 for padding which is kept for compatibility with the original code base. Mask values selected in `[0, 1]`: - 1 for tokens that are **masked**, - 0 for tokens that are **not masked**. You can only uses one of `input_mask` and `attention_mask`. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare XLNet Model transformer outputting raw hidden-states without any specific head on top.", XLNET_START_DOCSTRING, ) class XLNetModel(XLNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.mem_len = config.mem_len self.reuse_len = config.reuse_len self.d_model = config.d_model self.same_length = config.same_length self.attn_type = config.attn_type self.bi_data = config.bi_data self.clamp_len = config.clamp_len self.n_layer = config.n_layer self.word_embedding = nn.Embedding(config.vocab_size, config.d_model) self.mask_emb = nn.Parameter(torch.FloatTensor(1, 1, config.d_model)) self.layer = nn.ModuleList([XLNetLayer(config) for _ in range(config.n_layer)]) self.dropout = nn.Dropout(config.dropout) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.word_embedding def set_input_embeddings(self, new_embeddings): self.word_embedding = new_embeddings def _prune_heads(self, heads_to_prune): raise NotImplementedError def create_mask(self, qlen, mlen): """ Creates causal attention mask. Float mask where 1.0 indicates masked, 0.0 indicates not-masked. Args: qlen: Sequence length mlen: Mask length :: same_length=False: same_length=True: <mlen > < qlen > <mlen > < qlen > ^ [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 0 1 1 1] [1 0 0 0 0 0 1 1 1] qlen [0 0 0 0 0 0 0 1 1] [1 1 0 0 0 0 0 1 1] [0 0 0 0 0 0 0 0 1] [1 1 1 0 0 0 0 0 1] v [0 0 0 0 0 0 0 0 0] [1 1 1 1 0 0 0 0 0] """ mask = torch.ones((qlen, qlen + mlen), device=self.device) if self.same_length: mask_lo = mask[:, :qlen].tril(-1) mask.triu_(mlen + 1) mask[:, :qlen] += mask_lo else: mask.triu_(mlen + 1) return mask def cache_mem(self, curr_out, prev_mem): # cache hidden states into memory. if self.reuse_len is not None and self.reuse_len > 0: curr_out = curr_out[: self.reuse_len] if self.mem_len is None or self.mem_len == 0: # If `use_mems` is active but no `mem_len` is defined, the model behaves like GPT-2 at inference time # and returns all of the past and current hidden states. cutoff = 0 else: # If `use_mems` is active and `mem_len` is defined, the model returns the last `mem_len` hidden # states. This is the preferred setting for training and long-form generation. cutoff = -self.mem_len if prev_mem is None: # if `use_mems` is active and `mem_len` is defined, the model new_mem = curr_out[cutoff:] else: new_mem = torch.cat([prev_mem, curr_out], dim=0)[cutoff:] return new_mem.detach() @staticmethod def positional_embedding(pos_seq, inv_freq, bsz=None): sinusoid_inp = torch.einsum("i,d->id", pos_seq, inv_freq) pos_emb = torch.cat([torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)], dim=-1) pos_emb = pos_emb[:, None, :] if bsz is not None: pos_emb = pos_emb.expand(-1, bsz, -1) return pos_emb def relative_positional_encoding(self, qlen, klen, bsz=None): # create relative positional encoding. freq_seq = torch.arange(0, self.d_model, 2.0, dtype=torch.int64).float() inv_freq = 1 / torch.pow(10000, (freq_seq / self.d_model)) if self.attn_type == "bi": # beg, end = klen - 1, -qlen beg, end = klen, -qlen elif self.attn_type == "uni": # beg, end = klen - 1, -1 beg, end = klen, -1 else: raise ValueError(f"Unknown `attn_type` {self.attn_type}.") if self.bi_data: fwd_pos_seq = torch.arange(beg, end, -1.0, dtype=torch.int64).float() bwd_pos_seq = torch.arange(-beg, -end, 1.0, dtype=torch.int64).float() if self.clamp_len > 0: fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len) bwd_pos_seq = bwd_pos_seq.clamp(-self.clamp_len, self.clamp_len) if bsz is not None: fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz // 2) bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq, bsz // 2) else: fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq) bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq) pos_emb = torch.cat([fwd_pos_emb, bwd_pos_emb], dim=1) else: fwd_pos_seq = torch.arange(beg, end, -1.0, dtype=torch.int64).float() if self.clamp_len > 0: fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len) pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz) return pos_emb @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=XLNetModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, mems: Optional[torch.Tensor] = None, perm_mask: Optional[torch.Tensor] = None, target_mapping: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, input_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, use_mems: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, # delete after depreciation warning is removed ) -> Union[Tuple, XLNetModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if "use_cache" in kwargs: warnings.warn( "The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems`" " instead.", FutureWarning, ) use_mems = kwargs["use_cache"] if self.training: use_mems = use_mems if use_mems is not None else self.config.use_mems_train else: use_mems = use_mems if use_mems is not None else self.config.use_mems_eval # the original code for XLNet uses shapes [len, bsz] with the batch dimension at the end # but we want a unified interface in the library with the batch size on the first dimension # so we move here the first dimension (batch) to the end if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_ids = input_ids.transpose(0, 1).contiguous() qlen, bsz = input_ids.shape[0], input_ids.shape[1] elif inputs_embeds is not None: inputs_embeds = inputs_embeds.transpose(0, 1).contiguous() qlen, bsz = inputs_embeds.shape[0], inputs_embeds.shape[1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") token_type_ids = token_type_ids.transpose(0, 1).contiguous() if token_type_ids is not None else None input_mask = input_mask.transpose(0, 1).contiguous() if input_mask is not None else None attention_mask = attention_mask.transpose(0, 1).contiguous() if attention_mask is not None else None perm_mask = perm_mask.permute(1, 2, 0).contiguous() if perm_mask is not None else None target_mapping = target_mapping.permute(1, 2, 0).contiguous() if target_mapping is not None else None mlen = mems[0].shape[0] if mems is not None and mems[0] is not None else 0 klen = mlen + qlen dtype_float = self.dtype device = self.device # Attention mask # causal attention mask if self.attn_type == "uni": attn_mask = self.create_mask(qlen, mlen) attn_mask = attn_mask[:, :, None, None] elif self.attn_type == "bi": attn_mask = None else: raise ValueError(f"Unsupported attention type: {self.attn_type}") # data mask: input mask & perm mask assert input_mask is None or attention_mask is None, "You can only use one of input_mask (uses 1 for padding) " "or attention_mask (uses 0 for padding, added for compatibility with BERT). Please choose one." if input_mask is None and attention_mask is not None: input_mask = 1.0 - attention_mask if input_mask is not None and perm_mask is not None: data_mask = input_mask[None] + perm_mask elif input_mask is not None and perm_mask is None: data_mask = input_mask[None] elif input_mask is None and perm_mask is not None: data_mask = perm_mask else: data_mask = None if data_mask is not None: # all mems can be attended to if mlen > 0: mems_mask = torch.zeros([data_mask.shape[0], mlen, bsz]).to(data_mask) data_mask = torch.cat([mems_mask, data_mask], dim=1) if attn_mask is None: attn_mask = data_mask[:, :, :, None] else: attn_mask += data_mask[:, :, :, None] if attn_mask is not None: attn_mask = (attn_mask > 0).to(dtype_float) if attn_mask is not None: non_tgt_mask = -torch.eye(qlen).to(attn_mask) if mlen > 0: non_tgt_mask = torch.cat([torch.zeros([qlen, mlen]).to(attn_mask), non_tgt_mask], dim=-1) non_tgt_mask = ((attn_mask + non_tgt_mask[:, :, None, None]) > 0).to(attn_mask) else: non_tgt_mask = None # Word embeddings and prepare h & g hidden states if inputs_embeds is not None: word_emb_k = inputs_embeds else: word_emb_k = self.word_embedding(input_ids) output_h = self.dropout(word_emb_k) if target_mapping is not None: word_emb_q = self.mask_emb.expand(target_mapping.shape[0], bsz, -1) # else: # We removed the inp_q input which was same as target mapping # inp_q_ext = inp_q[:, :, None] # word_emb_q = inp_q_ext * self.mask_emb + (1 - inp_q_ext) * word_emb_k output_g = self.dropout(word_emb_q) else: output_g = None # Segment embedding if token_type_ids is not None: # Convert `token_type_ids` to one-hot `seg_mat` if mlen > 0: mem_pad = torch.zeros([mlen, bsz], dtype=torch.long, device=device) cat_ids = torch.cat([mem_pad, token_type_ids], dim=0) else: cat_ids = token_type_ids # `1` indicates not in the same segment [qlen x klen x bsz] seg_mat = (token_type_ids[:, None] != cat_ids[None, :]).long() seg_mat = nn.functional.one_hot(seg_mat, num_classes=2).to(dtype_float) else: seg_mat = None # Positional encoding pos_emb = self.relative_positional_encoding(qlen, klen, bsz=bsz) pos_emb = pos_emb.to(output_h.device) pos_emb = self.dropout(pos_emb) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer) # and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head] if head_mask is not None: if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0) head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1) head_mask = head_mask.to( dtype=next(self.parameters()).dtype ) # switch to float if need + fp16 compatibility else: head_mask = [None] * self.n_layer new_mems = () if mems is None: mems = [None] * len(self.layer) attentions = [] if output_attentions else None hidden_states = [] if output_hidden_states else None for i, layer_module in enumerate(self.layer): if use_mems: # cache new mems new_mems = new_mems + (self.cache_mem(output_h, mems[i]),) if output_hidden_states: hidden_states.append((output_h, output_g) if output_g is not None else output_h) outputs = layer_module( output_h, output_g, attn_mask_h=non_tgt_mask, attn_mask_g=attn_mask, r=pos_emb, seg_mat=seg_mat, mems=mems[i], target_mapping=target_mapping, head_mask=head_mask[i], output_attentions=output_attentions, ) output_h, output_g = outputs[:2] if output_attentions: attentions.append(outputs[2]) # Add last hidden state if output_hidden_states: hidden_states.append((output_h, output_g) if output_g is not None else output_h) output = self.dropout(output_g if output_g is not None else output_h) # Prepare outputs, we transpose back here to shape [bsz, len, hidden_dim] (cf. beginning of forward() method) output = output.permute(1, 0, 2).contiguous() if not use_mems: new_mems = None if output_hidden_states: if output_g is not None: hidden_states = tuple(h.permute(1, 0, 2).contiguous() for hs in hidden_states for h in hs) else: hidden_states = tuple(hs.permute(1, 0, 2).contiguous() for hs in hidden_states) if output_attentions: if target_mapping is not None: # when target_mapping is provided, there are 2-tuple of attentions attentions = tuple( tuple(att_stream.permute(2, 3, 0, 1).contiguous() for att_stream in t) for t in attentions ) else: attentions = tuple(t.permute(2, 3, 0, 1).contiguous() for t in attentions) if not return_dict: return tuple(v for v in [output, new_mems, hidden_states, attentions] if v is not None) return XLNetModelOutput( last_hidden_state=output, mems=new_mems, hidden_states=hidden_states, attentions=attentions ) @add_start_docstrings( """ XLNet Model with a language modeling head on top (linear layer with weights tied to the input embeddings). """, XLNET_START_DOCSTRING, ) class XLNetLMHeadModel(XLNetPreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_loss.weight"] def __init__(self, config): super().__init__(config) self.attn_type = config.attn_type self.same_length = config.same_length self.transformer = XLNetModel(config) self.lm_loss = nn.Linear(config.d_model, config.vocab_size, bias=True) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_loss def set_output_embeddings(self, new_embeddings): self.lm_loss = new_embeddings def prepare_inputs_for_generation(self, input_ids, past_key_values=None, use_mems=None, **kwargs): # Overwritten -- this model has unique input preparation # Add dummy token at the end (no attention on this one) effective_batch_size = input_ids.shape[0] dummy_token = torch.zeros((effective_batch_size, 1), dtype=torch.long, device=input_ids.device) # At every pass, the attention values for the new token and the two last generated tokens # are computed, the rest is reloaded from the `past` cache. A purely auto-regressive model would have # offset = 1; offset = 2 seems to have slightly better computation. offset = 2 if past_key_values: input_ids = torch.cat([input_ids[:, -offset:], dummy_token], dim=1) else: input_ids = torch.cat([input_ids, dummy_token], dim=1) # Build permutation mask so that previous tokens don't see last token sequence_length = input_ids.shape[1] perm_mask = torch.zeros( (effective_batch_size, sequence_length, sequence_length), dtype=torch.float, device=input_ids.device ) perm_mask[:, :, -1] = 1.0 # We'll only predict the last token target_mapping = torch.zeros( (effective_batch_size, 1, sequence_length), dtype=torch.float, device=input_ids.device ) target_mapping[:, 0, -1] = 1.0 inputs = { "input_ids": input_ids, "perm_mask": perm_mask, "target_mapping": target_mapping, "use_mems": use_mems, } # if past is defined in model kwargs then use it for faster decoding if past_key_values: inputs["mems"] = tuple(layer_past[:-offset, :, :] for layer_past in past_key_values) return inputs @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=XLNetLMHeadModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, mems: Optional[torch.Tensor] = None, perm_mask: Optional[torch.Tensor] = None, target_mapping: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, input_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_mems: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, # delete when `use_cache` is removed in XLNetModel ) -> Union[Tuple, XLNetLMHeadModelOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, num_predict)`, *optional*): Labels for masked language modeling. `num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`. The labels should correspond to the masked input words that should be predicted and depends on `target_mapping`. Note in order to perform standard auto-regressive language modeling a *<mask>* token has to be added to the `input_ids` (see the `prepare_inputs_for_generation` function and examples below) Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored, the loss is only computed for labels in `[0, ..., config.vocab_size]` Return: Examples: ```python >>> from transformers import AutoTokenizer, XLNetLMHeadModel >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("xlnet/xlnet-large-cased") >>> model = XLNetLMHeadModel.from_pretrained("xlnet/xlnet-large-cased") >>> # We show how to setup inputs to predict a next token using a bi-directional context. >>> input_ids = torch.tensor( ... tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=False) ... ).unsqueeze( ... 0 ... ) # We will predict the masked token >>> perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float) >>> perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token >>> target_mapping = torch.zeros( ... (1, 1, input_ids.shape[1]), dtype=torch.float ... ) # Shape [1, 1, seq_length] => let's predict one token >>> target_mapping[ ... 0, 0, -1 ... ] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token) >>> outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping) >>> next_token_logits = outputs[ ... 0 ... ] # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size] >>> # The same way can the XLNetLMHeadModel be used to be trained by standard auto-regressive language modeling. >>> input_ids = torch.tensor( ... tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=False) ... ).unsqueeze( ... 0 ... ) # We will predict the masked token >>> labels = torch.tensor(tokenizer.encode("cute", add_special_tokens=False)).unsqueeze(0) >>> assert labels.shape[0] == 1, "only one word will be predicted" >>> perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float) >>> perm_mask[ ... :, :, -1 ... ] = 1.0 # Previous tokens don't see last token as is done in standard auto-regressive lm training >>> target_mapping = torch.zeros( ... (1, 1, input_ids.shape[1]), dtype=torch.float ... ) # Shape [1, 1, seq_length] => let's predict one token >>> target_mapping[ ... 0, 0, -1 ... ] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token) >>> outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping, labels=labels) >>> loss = outputs.loss >>> next_token_logits = ( ... outputs.logits ... ) # Logits have shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) logits = self.lm_loss(transformer_outputs[0]) loss = None if labels is not None: # Flatten the tokens loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1)) if not return_dict: output = (logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return XLNetLMHeadModelOutput( loss=loss, logits=logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @staticmethod def _reorder_cache(mems: List[torch.Tensor], beam_idx: torch.Tensor) -> List[torch.Tensor]: """ This function is used to re-order the `mems` cache if [`~PreTrainedModel.beam_search`] or [`~PreTrainedModel.beam_sample`] is called. This is required to match `mems` with the correct beam_idx at every generation step. """ return [layer_past.index_select(1, beam_idx.to(layer_past.device)) for layer_past in mems] @add_start_docstrings( """ XLNet Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, XLNET_START_DOCSTRING, ) class XLNetForSequenceClassification(XLNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.transformer = XLNetModel(config) self.sequence_summary = SequenceSummary(config) self.logits_proj = nn.Linear(config.d_model, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=XLNetForSequenceClassificationOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, mems: Optional[torch.Tensor] = None, perm_mask: Optional[torch.Tensor] = None, target_mapping: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, input_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_mems: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, # delete when `use_cache` is removed in XLNetModel ) -> Union[Tuple, XLNetForSequenceClassificationOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) output = transformer_outputs[0] output = self.sequence_summary(output) logits = self.logits_proj(output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return XLNetForSequenceClassificationOutput( loss=loss, logits=logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @add_start_docstrings( """ XLNet Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, XLNET_START_DOCSTRING, ) class XLNetForTokenClassification(XLNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = XLNetModel(config) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=XLNetForTokenClassificationOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, mems: Optional[torch.Tensor] = None, perm_mask: Optional[torch.Tensor] = None, target_mapping: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, input_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_mems: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, # delete when `use_cache` is removed in XLNetModel ) -> Union[Tuple, XLNetForTokenClassificationOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]` where *num_choices* is the size of the second dimension of the input tensors. (see *input_ids* above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.transformer( input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return XLNetForTokenClassificationOutput( loss=loss, logits=logits, mems=outputs.mems, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ XLNet Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RACE/SWAG tasks. """, XLNET_START_DOCSTRING, ) class XLNetForMultipleChoice(XLNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.transformer = XLNetModel(config) self.sequence_summary = SequenceSummary(config) self.logits_proj = nn.Linear(config.d_model, 1) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=XLNetForMultipleChoiceOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, input_mask: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, mems: Optional[torch.Tensor] = None, perm_mask: Optional[torch.Tensor] = None, target_mapping: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_mems: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, # delete when `use_cache` is removed in XLNetModel ) -> Union[Tuple, XLNetForMultipleChoiceOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None flat_input_mask = input_mask.view(-1, input_mask.size(-1)) if input_mask is not None else None flat_inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) transformer_outputs = self.transformer( flat_input_ids, token_type_ids=flat_token_type_ids, input_mask=flat_input_mask, attention_mask=flat_attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, head_mask=head_mask, inputs_embeds=flat_inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) output = transformer_outputs[0] output = self.sequence_summary(output) logits = self.logits_proj(output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels.view(-1)) if not return_dict: output = (reshaped_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return XLNetForMultipleChoiceOutput( loss=loss, logits=reshaped_logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @add_start_docstrings( """ XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, XLNET_START_DOCSTRING, ) class XLNetForQuestionAnsweringSimple(XLNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = XLNetModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=XLNetForQuestionAnsweringSimpleOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, mems: Optional[torch.Tensor] = None, perm_mask: Optional[torch.Tensor] = None, target_mapping: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, input_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, start_positions: Optional[torch.Tensor] = None, end_positions: Optional[torch.Tensor] = None, use_mems: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, # delete when `use_cache` is removed in XLNetModel ) -> Union[Tuple, XLNetForQuestionAnsweringSimpleOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.transformer( input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return XLNetForQuestionAnsweringSimpleOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, mems=outputs.mems, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, XLNET_START_DOCSTRING, ) class XLNetForQuestionAnswering(XLNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.start_n_top = config.start_n_top self.end_n_top = config.end_n_top self.transformer = XLNetModel(config) self.start_logits = PoolerStartLogits(config) self.end_logits = PoolerEndLogits(config) self.answer_class = PoolerAnswerClass(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=XLNetForQuestionAnsweringOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, mems: Optional[torch.Tensor] = None, perm_mask: Optional[torch.Tensor] = None, target_mapping: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, input_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, start_positions: Optional[torch.Tensor] = None, end_positions: Optional[torch.Tensor] = None, is_impossible: Optional[torch.Tensor] = None, cls_index: Optional[torch.Tensor] = None, p_mask: Optional[torch.Tensor] = None, use_mems: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, # delete when `use_cache` is removed in XLNetModel ) -> Union[Tuple, XLNetForQuestionAnsweringOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. is_impossible (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels whether a question has an answer or no answer (SQuAD 2.0) cls_index (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the classification token to use as input for computing plausibility of the answer. p_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...). 1.0 means token should be masked. 0.0 mean token is not masked. Returns: Example: ```python >>> from transformers import AutoTokenizer, XLNetForQuestionAnswering >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("xlnet/xlnet-base-cased") >>> model = XLNetForQuestionAnswering.from_pretrained("xlnet/xlnet-base-cased") >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze( ... 0 ... ) # Batch size 1 >>> start_positions = torch.tensor([1]) >>> end_positions = torch.tensor([3]) >>> outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions) >>> loss = outputs.loss ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) hidden_states = transformer_outputs[0] start_logits = self.start_logits(hidden_states, p_mask=p_mask) outputs = transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it if start_positions is not None and end_positions is not None: # If we are on multi-GPU, let's remove the dimension added by batch splitting for x in (start_positions, end_positions, cls_index, is_impossible): if x is not None and x.dim() > 1: x.squeeze_(-1) # during training, compute the end logits based on the ground truth of the start position end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask) loss_fct = CrossEntropyLoss() start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if cls_index is not None and is_impossible is not None: # Predict answerability from the representation of CLS and START cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index) loss_fct_cls = nn.BCEWithLogitsLoss() cls_loss = loss_fct_cls(cls_logits, is_impossible) # note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss total_loss += cls_loss * 0.5 if not return_dict: return (total_loss,) + transformer_outputs[1:] else: return XLNetForQuestionAnsweringOutput( loss=total_loss, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) else: # during inference, compute the end logits based on beam search bsz, slen, hsz = hidden_states.size() start_log_probs = nn.functional.softmax(start_logits, dim=-1) # shape (bsz, slen) start_top_log_probs, start_top_index = torch.topk( start_log_probs, self.start_n_top, dim=-1 ) # shape (bsz, start_n_top) start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz) start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz) start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz) hidden_states_expanded = hidden_states.unsqueeze(2).expand_as( start_states ) # shape (bsz, slen, start_n_top, hsz) p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask) end_log_probs = nn.functional.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top) end_top_log_probs, end_top_index = torch.topk( end_log_probs, self.end_n_top, dim=1 ) # shape (bsz, end_n_top, start_n_top) end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top) end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top) start_states = torch.einsum( "blh,bl->bh", hidden_states, start_log_probs ) # get the representation of START as weighted sum of hidden states cls_logits = self.answer_class( hidden_states, start_states=start_states, cls_index=cls_index ) # Shape (batch size,): one single `cls_logits` for each sample if not return_dict: outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) return outputs + transformer_outputs[1:] else: return XLNetForQuestionAnsweringOutput( start_top_log_probs=start_top_log_probs, start_top_index=start_top_index, end_top_log_probs=end_top_log_probs, end_top_index=end_top_index, cls_logits=cls_logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) __all__ = [ "XLNetForMultipleChoice", "XLNetForQuestionAnswering", "XLNetForQuestionAnsweringSimple", "XLNetForSequenceClassification", "XLNetForTokenClassification", "XLNetLMHeadModel", "XLNetModel", "XLNetPreTrainedModel", "load_tf_weights_in_xlnet", ]
transformers/src/transformers/models/xlnet/modeling_xlnet.py/0
{ "file_path": "transformers/src/transformers/models/xlnet/modeling_xlnet.py", "repo_id": "transformers", "token_count": 41875 }
149
# coding=utf-8 # Copyright 2022 University of Wisconsin-Madison and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch YOSO model.""" import math from pathlib import Path from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_ninja_available, is_torch_cuda_available, logging, ) from .configuration_yoso import YosoConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "uw-madison/yoso-4096" _CONFIG_FOR_DOC = "YosoConfig" lsh_cumulation = None def load_cuda_kernels(): global lsh_cumulation from torch.utils.cpp_extension import load def append_root(files): src_folder = Path(__file__).resolve().parent.parent.parent / "kernels" / "yoso" return [src_folder / file for file in files] src_files = append_root(["fast_lsh_cumulation_torch.cpp", "fast_lsh_cumulation.cu", "fast_lsh_cumulation_cuda.cu"]) load("fast_lsh_cumulation", src_files, verbose=True) import fast_lsh_cumulation as lsh_cumulation def to_contiguous(input_tensors): if isinstance(input_tensors, list): out = [] for tensor in input_tensors: if not tensor.is_contiguous(): tensor = tensor.contiguous() out.append(tensor) return out else: if not input_tensors.is_contiguous(): input_tensors = input_tensors.contiguous() return input_tensors def normalize(input_tensors): if isinstance(input_tensors, list): out = [] for tensor in input_tensors: out.append(nn.functional.normalize(tensor, p=2, dim=-1)) return out else: return nn.functional.normalize(input_tensors, p=2, dim=-1) def hashing(query, key, num_hash, hash_len): if len(query.size()) != 3: raise ValueError("Query has incorrect size.") if len(key.size()) != 3: raise ValueError("Key has incorrect size.") rmat = torch.randn(query.size(0), query.size(2), num_hash * hash_len, device=query.device) raise_pow = 2 ** torch.arange(hash_len, device=query.device) query_projection = torch.matmul(query, rmat).reshape(query.size(0), query.size(1), num_hash, hash_len) key_projection = torch.matmul(key, rmat).reshape(key.size(0), key.size(1), num_hash, hash_len) query_binary = (query_projection > 0).int() key_binary = (key_projection > 0).int() query_hash = torch.sum(query_binary * raise_pow, dim=-1) query_hash = torch.sum(key_binary * raise_pow, dim=-1) return query_hash.int(), query_hash.int() class YosoCumulation(torch.autograd.Function): @staticmethod def forward(ctx, query_mask, key_mask, query, key, value, config): hash_code_len = config["hash_code_len"] expectation = (1 - torch.acos(torch.matmul(query, key.transpose(-1, -2))) / math.pi) ** hash_code_len expectation = expectation * query_mask[:, :, None] * key_mask[:, None, :] cumulation_value = torch.matmul(expectation, value) ctx.save_for_backward(query_mask, key_mask, expectation, query, key, value) ctx.config = config return cumulation_value @staticmethod def backward(ctx, grad): grad = to_contiguous(grad) query_mask, key_mask, expectation, query, key, value = ctx.saved_tensors config = ctx.config hash_code_len = config["hash_code_len"] weighted_exp = torch.matmul(grad, value.transpose(-1, -2)) * expectation grad_query = torch.matmul(weighted_exp, (hash_code_len / 2) * key) grad_key = torch.matmul(weighted_exp.transpose(-1, -2), (hash_code_len / 2) * query) grad_value = torch.matmul(expectation.transpose(-1, -2), grad) return None, None, grad_query, grad_key, grad_value, None class YosoLSHCumulation(torch.autograd.Function): @staticmethod def forward(ctx, query_mask, key_mask, query, key, value, config): if query_mask.size(0) != key_mask.size(0): raise ValueError("Query mask and Key mask differ in sizes in dimension 0") if query_mask.size(0) != query.size(0): raise ValueError("Query mask and Query differ in sizes in dimension 0") if query_mask.size(0) != key.size(0): raise ValueError("Query mask and Key differ in sizes in dimension 0") if query_mask.size(0) != value.size(0): raise ValueError("Query mask and Value mask differ in sizes in dimension 0") if key.size(1) != value.size(1): raise ValueError("Key and Value differ in sizes in dimension 1") if query.size(2) != key.size(2): raise ValueError("Query and Key differ in sizes in dimension 2") query_mask, key_mask, query, key, value = to_contiguous([query_mask, key_mask, query, key, value]) use_cuda = query_mask.is_cuda num_hash = config["num_hash"] hash_code_len = config["hash_code_len"] hashtable_capacity = int(2**hash_code_len) if config["use_fast_hash"]: query_hash_code, key_hash_code = lsh_cumulation.fast_hash( query_mask, query, key_mask, key, num_hash, hash_code_len, use_cuda, 1 ) else: query_hash_code, key_hash_code = hashing(query, key, num_hash, hash_code_len) cumulation_value = lsh_cumulation.lsh_cumulation( query_mask, query_hash_code, key_mask, key_hash_code, value, hashtable_capacity, use_cuda, 1 ) ctx.save_for_backward(query_mask, key_mask, query_hash_code, key_hash_code, query, key, value) ctx.config = config return cumulation_value @staticmethod def backward(ctx, grad): grad = to_contiguous(grad) query_mask, key_mask, query_hash_code, key_hash_code, query, key, value = ctx.saved_tensors config = ctx.config use_cuda = grad.is_cuda hash_code_len = config["hash_code_len"] hashtable_capacity = int(2**hash_code_len) if config["lsh_backward"]: grad_value = lsh_cumulation.lsh_cumulation( key_mask, key_hash_code, query_mask, query_hash_code, grad, hashtable_capacity, use_cuda, 1 ) grad_query = lsh_cumulation.lsh_weighted_cumulation( query_mask, query_hash_code, grad, key_mask, key_hash_code, value, (hash_code_len / 2) * key, hashtable_capacity, use_cuda, 4, ) grad_key = lsh_cumulation.lsh_weighted_cumulation( key_mask, key_hash_code, value, query_mask, query_hash_code, grad, (hash_code_len / 2) * query, hashtable_capacity, use_cuda, 4, ) else: expectation = (1 - torch.acos(torch.matmul(query, key.transpose(-1, -2))) / math.pi) ** hash_code_len expectation = expectation * query_mask[:, :, None] * key_mask[:, None, :] weighted_exp = torch.matmul(grad, value.transpose(-1, -2)) * expectation grad_query = torch.matmul(weighted_exp, (hash_code_len / 2) * key) grad_key = torch.matmul(weighted_exp.transpose(-1, -2), (hash_code_len / 2) * query) grad_value = torch.matmul(expectation.transpose(-1, -2), grad) return None, None, grad_query, grad_key, grad_value, None # Copied from transformers.models.nystromformer.modeling_nystromformer.NystromformerEmbeddings class YosoEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings + 2, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)) + 2, persistent=False ) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), persistent=False, ) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, :seq_length] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class YosoSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) kernel_loaded = lsh_cumulation is not None if is_torch_cuda_available() and is_ninja_available() and not kernel_loaded: try: load_cuda_kernels() except Exception as e: logger.warning(f"Could not load the custom kernel for multi-scale deformable attention: {e}") self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = ( position_embedding_type if position_embedding_type is not None else config.position_embedding_type ) self.use_expectation = config.use_expectation self.hash_code_len = config.hash_code_len self.use_conv = config.conv_window is not None self.use_fast_hash = config.use_fast_hash self.num_hash = config.num_hash self.lsh_backward = config.lsh_backward self.lsh_config = { "hash_code_len": self.hash_code_len, "use_fast_hash": self.use_fast_hash, "num_hash": self.num_hash, "lsh_backward": self.lsh_backward, } if config.conv_window is not None: self.conv = nn.Conv2d( in_channels=config.num_attention_heads, out_channels=config.num_attention_heads, kernel_size=(config.conv_window, 1), padding=(config.conv_window // 2, 0), bias=False, groups=config.num_attention_heads, ) def transpose_for_scores(self, layer): new_layer_shape = layer.size()[:-1] + (self.num_attention_heads, self.attention_head_size) layer = layer.view(*new_layer_shape) return layer.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask=None, output_attentions=False): mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) if self.use_conv: conv_value_layer = self.conv(value_layer * attention_mask[:, None, :, None]) batch_size, num_heads, seq_len, head_dim = query_layer.size() query_layer = query_layer.reshape(batch_size * num_heads, seq_len, head_dim) key_layer = key_layer.reshape(batch_size * num_heads, seq_len, head_dim) value_layer = value_layer.reshape(batch_size * num_heads, seq_len, head_dim) attention_mask = 1.0 + attention_mask / 10000.0 attention_mask = ( attention_mask.unsqueeze(1) .repeat_interleave(num_heads, dim=1) .reshape(batch_size * num_heads, seq_len) .int() ) # The CUDA kernels are most efficient with inputs whose size is a multiple of a GPU's warp size (32). Inputs # smaller than this are padded with zeros. gpu_warp_size = 32 if (not self.use_expectation) and head_dim < gpu_warp_size: pad_size = batch_size * num_heads, seq_len, gpu_warp_size - head_dim query_layer = torch.cat( [ query_layer, torch.zeros(pad_size, device=query_layer.device), ], dim=-1, ) key_layer = torch.cat( [ key_layer, torch.zeros(pad_size, device=key_layer.device), ], dim=-1, ) value_layer = torch.cat( [ value_layer, torch.zeros(pad_size, device=value_layer.device), ], dim=-1, ) if self.use_expectation or self.training: query_layer, key_layer = normalize([query_layer, key_layer]) if self.use_expectation: context_layer = YosoCumulation.apply( attention_mask, attention_mask, query_layer, key_layer, value_layer, self.lsh_config ) else: context_layer = YosoLSHCumulation.apply( attention_mask, attention_mask, query_layer, key_layer, value_layer, self.lsh_config ) if (not self.use_expectation) and head_dim < gpu_warp_size: context_layer = context_layer[:, :, :head_dim] context_layer = normalize(context_layer) context_layer = context_layer.reshape(batch_size, num_heads, seq_len, head_dim) if self.use_conv: context_layer += conv_value_layer context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, context_layer) if output_attentions else (context_layer,) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput class YosoSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class YosoAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = YosoSelfAttention(config, position_embedding_type=position_embedding_type) self.output = YosoSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states, attention_mask=None, output_attentions=False): self_outputs = self.self(hidden_states, attention_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate class YosoIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput class YosoOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class YosoLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = YosoAttention(config) self.add_cross_attention = config.add_cross_attention self.intermediate = YosoIntermediate(config) self.output = YosoOutput(config) def forward(self, hidden_states, attention_mask=None, output_attentions=False): self_attention_outputs = self.attention(hidden_states, attention_mask, output_attentions=output_attentions) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class YosoEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([YosoLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, attention_mask, output_attentions, ) else: layer_outputs = layer_module(hidden_states, attention_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutputWithCrossAttentions( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform class YosoPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->Yoso class YosoLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = YosoPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def _tie_weights(self): self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->Yoso class YosoOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = YosoLMPredictionHead(config) def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: prediction_scores = self.predictions(sequence_output) return prediction_scores class YosoPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = YosoConfig base_model_prefix = "yoso" supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) YOSO_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`YosoConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ YOSO_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare YOSO Model transformer outputting raw hidden-states without any specific head on top.", YOSO_START_DOCSTRING, ) class YosoModel(YosoPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embeddings = YosoEmbeddings(config) self.encoder = YosoEncoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(YOSO_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length)), device=device) if token_type_ids is None: if hasattr(self.embeddings, "token_type_ids"): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) encoder_outputs = self.encoder( embedding_output, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutputWithCrossAttentions( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) @add_start_docstrings("""YOSO Model with a `language modeling` head on top.""", YOSO_START_DOCSTRING) class YosoForMaskedLM(YosoPreTrainedModel): _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] def __init__(self, config): super().__init__(config) self.yoso = YosoModel(config) self.cls = YosoOnlyMLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(YOSO_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, MaskedLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.yoso( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class YosoClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) self.config = config def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = ACT2FN[self.config.hidden_act](x) x = self.dropout(x) x = self.out_proj(x) return x @add_start_docstrings( """YOSO Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.""", YOSO_START_DOCSTRING, ) class YosoForSequenceClassification(YosoPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.yoso = YosoModel(config) self.classifier = YosoClassificationHead(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(YOSO_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.yoso( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """YOSO Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.""", YOSO_START_DOCSTRING, ) class YosoForMultipleChoice(YosoPreTrainedModel): def __init__(self, config): super().__init__(config) self.yoso = YosoModel(config) self.pre_classifier = nn.Linear(config.hidden_size, config.hidden_size) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(YOSO_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, MultipleChoiceModelOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.yoso( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_state = outputs[0] # (bs * num_choices, seq_len, dim) pooled_output = hidden_state[:, 0] # (bs * num_choices, dim) pooled_output = self.pre_classifier(pooled_output) # (bs * num_choices, dim) pooled_output = nn.ReLU()(pooled_output) # (bs * num_choices, dim) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """YOSO Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.""", YOSO_START_DOCSTRING, ) class YosoForTokenClassification(YosoPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.yoso = YosoModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(YOSO_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.yoso( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels) active_labels = torch.where( active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """YOSO Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).""", YOSO_START_DOCSTRING, ) class YosoForQuestionAnswering(YosoPreTrainedModel): def __init__(self, config): super().__init__(config) config.num_labels = 2 self.num_labels = config.num_labels self.yoso = YosoModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(YOSO_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, start_positions: Optional[torch.Tensor] = None, end_positions: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, QuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.yoso( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = [ "YosoForMaskedLM", "YosoForMultipleChoice", "YosoForQuestionAnswering", "YosoForSequenceClassification", "YosoForTokenClassification", "YosoLayer", "YosoModel", "YosoPreTrainedModel", ]
transformers/src/transformers/models/yoso/modeling_yoso.py/0
{ "file_path": "transformers/src/transformers/models/yoso/modeling_yoso.py", "repo_id": "transformers", "token_count": 23868 }
150
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from inspect import signature from itertools import chain from pathlib import Path from typing import TYPE_CHECKING, Iterable, List, Tuple, Union import numpy as np from packaging.version import Version, parse from ..tokenization_utils_base import PreTrainedTokenizerBase from ..utils import ( TensorType, is_tf_available, is_torch_available, logging, ) from .config import OnnxConfig if is_torch_available(): from ..modeling_utils import PreTrainedModel if is_tf_available(): from ..modeling_tf_utils import TFPreTrainedModel if TYPE_CHECKING: from ..feature_extraction_utils import FeatureExtractionMixin from ..processing_utils import ProcessorMixin from ..tokenization_utils import PreTrainedTokenizer logger = logging.get_logger(__name__) # pylint: disable=invalid-name # This is the minimal required version to support some ONNX Runtime features ORT_QUANTIZE_MINIMUM_VERSION = parse("1.4.0") def check_onnxruntime_requirements(minimum_version: Version): """ Check onnxruntime is installed and if the installed version match is recent enough Raises: ImportError: If onnxruntime is not installed or too old version is found """ try: import onnxruntime # Parse the version of the installed onnxruntime ort_version = parse(onnxruntime.__version__) # We require 1.4.0 minimum if ort_version < ORT_QUANTIZE_MINIMUM_VERSION: raise ImportError( f"We found an older version of onnxruntime ({onnxruntime.__version__}) " f"but we require onnxruntime to be >= {minimum_version} to enable all the conversions options.\n" "Please update onnxruntime by running `pip install --upgrade onnxruntime`" ) except ImportError: raise ImportError( "onnxruntime doesn't seem to be currently installed. " "Please install the onnxruntime by running `pip install onnxruntime`" " and relaunch the conversion." ) def export_pytorch( preprocessor: Union["PreTrainedTokenizer", "FeatureExtractionMixin", "ProcessorMixin"], model: "PreTrainedModel", config: OnnxConfig, opset: int, output: Path, tokenizer: "PreTrainedTokenizer" = None, device: str = "cpu", ) -> Tuple[List[str], List[str]]: """ Export a PyTorch model to an ONNX Intermediate Representation (IR) Args: preprocessor: ([`PreTrainedTokenizer`], [`FeatureExtractionMixin`] or [`ProcessorMixin`]): The preprocessor used for encoding the data. model ([`PreTrainedModel`]): The model to export. config ([`~onnx.config.OnnxConfig`]): The ONNX configuration associated with the exported model. opset (`int`): The version of the ONNX operator set to use. output (`Path`): Directory to store the exported ONNX model. device (`str`, *optional*, defaults to `cpu`): The device on which the ONNX model will be exported. Either `cpu` or `cuda`. Returns: `Tuple[List[str], List[str]]`: A tuple with an ordered list of the model's inputs, and the named inputs from the ONNX configuration. """ if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None: raise ValueError("You cannot provide both a tokenizer and a preprocessor to export the model.") if tokenizer is not None: warnings.warn( "The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use" " `preprocessor` instead.", FutureWarning, ) logger.info("Overwriting the `preprocessor` argument with `tokenizer` to generate dummmy inputs.") preprocessor = tokenizer if issubclass(type(model), PreTrainedModel): import torch from torch.onnx import export as onnx_export logger.info(f"Using framework PyTorch: {torch.__version__}") with torch.no_grad(): model.config.return_dict = True model.eval() # Check if we need to override certain configuration item if config.values_override is not None: logger.info(f"Overriding {len(config.values_override)} configuration item(s)") for override_config_key, override_config_value in config.values_override.items(): logger.info(f"\t- {override_config_key} -> {override_config_value}") setattr(model.config, override_config_key, override_config_value) # Ensure inputs match # TODO: Check when exporting QA we provide "is_pair=True" model_inputs = config.generate_dummy_inputs(preprocessor, framework=TensorType.PYTORCH) device = torch.device(device) if device.type == "cuda" and torch.cuda.is_available(): model.to(device) model_inputs_device = {} for k, v in model_inputs.items(): if isinstance(v, Tuple): model_inputs_device[k] = tuple( x.to(device) if isinstance(x, torch.Tensor) else None for x in v ) elif isinstance(v, List): model_inputs_device[k] = [ tuple(x.to(device) if isinstance(x, torch.Tensor) else None for x in t) for t in v ] else: model_inputs_device[k] = v.to(device) model_inputs = model_inputs_device inputs_match, matched_inputs = ensure_model_and_config_inputs_match(model, model_inputs.keys()) onnx_outputs = list(config.outputs.keys()) if not inputs_match: raise ValueError("Model and config inputs doesn't match") config.patch_ops() onnx_export( model, (model_inputs,), f=output.as_posix(), input_names=list(config.inputs.keys()), output_names=onnx_outputs, dynamic_axes=dict(chain(config.inputs.items(), config.outputs.items())), do_constant_folding=True, opset_version=opset, ) config.restore_ops() return matched_inputs, onnx_outputs def export_tensorflow( preprocessor: Union["PreTrainedTokenizer", "FeatureExtractionMixin"], model: "TFPreTrainedModel", config: OnnxConfig, opset: int, output: Path, tokenizer: "PreTrainedTokenizer" = None, ) -> Tuple[List[str], List[str]]: """ Export a TensorFlow model to an ONNX Intermediate Representation (IR) Args: preprocessor: ([`PreTrainedTokenizer`] or [`FeatureExtractionMixin`]): The preprocessor used for encoding the data. model ([`TFPreTrainedModel`]): The model to export. config ([`~onnx.config.OnnxConfig`]): The ONNX configuration associated with the exported model. opset (`int`): The version of the ONNX operator set to use. output (`Path`): Directory to store the exported ONNX model. Returns: `Tuple[List[str], List[str]]`: A tuple with an ordered list of the model's inputs, and the named inputs from the ONNX configuration. """ import onnx import tensorflow as tf import tf2onnx if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None: raise ValueError("You cannot provide both a tokenizer and preprocessor to export the model.") if tokenizer is not None: warnings.warn( "The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use" " `preprocessor` instead.", FutureWarning, ) logger.info("Overwriting the `preprocessor` argument with `tokenizer` to generate dummmy inputs.") preprocessor = tokenizer model.config.return_dict = True # Check if we need to override certain configuration item if config.values_override is not None: logger.info(f"Overriding {len(config.values_override)} configuration item(s)") for override_config_key, override_config_value in config.values_override.items(): logger.info(f"\t- {override_config_key} -> {override_config_value}") setattr(model.config, override_config_key, override_config_value) # Ensure inputs match model_inputs = config.generate_dummy_inputs(preprocessor, framework=TensorType.TENSORFLOW) inputs_match, matched_inputs = ensure_model_and_config_inputs_match(model, model_inputs.keys()) onnx_outputs = list(config.outputs.keys()) input_signature = [ tf.TensorSpec([None] * tensor.ndim, dtype=tensor.dtype, name=key) for key, tensor in model_inputs.items() ] onnx_model, _ = tf2onnx.convert.from_keras(model, input_signature, opset=opset) onnx.save(onnx_model, output.as_posix()) config.restore_ops() return matched_inputs, onnx_outputs def export( preprocessor: Union["PreTrainedTokenizer", "FeatureExtractionMixin", "ProcessorMixin"], model: Union["PreTrainedModel", "TFPreTrainedModel"], config: OnnxConfig, opset: int, output: Path, tokenizer: "PreTrainedTokenizer" = None, device: str = "cpu", ) -> Tuple[List[str], List[str]]: """ Export a Pytorch or TensorFlow model to an ONNX Intermediate Representation (IR) Args: preprocessor: ([`PreTrainedTokenizer`], [`FeatureExtractionMixin`] or [`ProcessorMixin`]): The preprocessor used for encoding the data. model ([`PreTrainedModel`] or [`TFPreTrainedModel`]): The model to export. config ([`~onnx.config.OnnxConfig`]): The ONNX configuration associated with the exported model. opset (`int`): The version of the ONNX operator set to use. output (`Path`): Directory to store the exported ONNX model. device (`str`, *optional*, defaults to `cpu`): The device on which the ONNX model will be exported. Either `cpu` or `cuda`. Only PyTorch is supported for export on CUDA devices. Returns: `Tuple[List[str], List[str]]`: A tuple with an ordered list of the model's inputs, and the named inputs from the ONNX configuration. """ if not (is_torch_available() or is_tf_available()): raise ImportError( "Cannot convert because neither PyTorch nor TensorFlow are not installed. " "Please install torch or tensorflow first." ) if is_tf_available() and isinstance(model, TFPreTrainedModel) and device == "cuda": raise RuntimeError("`tf2onnx` does not support export on CUDA device.") if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None: raise ValueError("You cannot provide both a tokenizer and a preprocessor to export the model.") if tokenizer is not None: warnings.warn( "The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use" " `preprocessor` instead.", FutureWarning, ) logger.info("Overwriting the `preprocessor` argument with `tokenizer` to generate dummmy inputs.") preprocessor = tokenizer if is_torch_available(): from ..utils import get_torch_version if not config.is_torch_support_available: logger.warning( f"Unsupported PyTorch version for this model. Minimum required is {config.torch_onnx_minimum_version}," f" got: {get_torch_version()}" ) if is_torch_available() and issubclass(type(model), PreTrainedModel): return export_pytorch(preprocessor, model, config, opset, output, tokenizer=tokenizer, device=device) elif is_tf_available() and issubclass(type(model), TFPreTrainedModel): return export_tensorflow(preprocessor, model, config, opset, output, tokenizer=tokenizer) def validate_model_outputs( config: OnnxConfig, preprocessor: Union["PreTrainedTokenizer", "FeatureExtractionMixin", "ProcessorMixin"], reference_model: Union["PreTrainedModel", "TFPreTrainedModel"], onnx_model: Path, onnx_named_outputs: List[str], atol: float, tokenizer: "PreTrainedTokenizer" = None, ): from onnxruntime import InferenceSession, SessionOptions logger.info("Validating ONNX model...") if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None: raise ValueError("You cannot provide both a tokenizer and a preprocessor to validate the model outputs.") if tokenizer is not None: warnings.warn( "The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use" " `preprocessor` instead.", FutureWarning, ) logger.info("Overwriting the `preprocessor` argument with `tokenizer` to generate dummmy inputs.") preprocessor = tokenizer # generate inputs with a different batch_size and seq_len that was used for conversion to properly test # dynamic input shapes. if is_torch_available() and issubclass(type(reference_model), PreTrainedModel): reference_model_inputs = config.generate_dummy_inputs( preprocessor, batch_size=config.default_fixed_batch + 1, seq_length=config.default_fixed_sequence + 1, framework=TensorType.PYTORCH, ) else: reference_model_inputs = config.generate_dummy_inputs( preprocessor, batch_size=config.default_fixed_batch + 1, seq_length=config.default_fixed_sequence + 1, framework=TensorType.TENSORFLOW, ) # Create ONNX Runtime session options = SessionOptions() session = InferenceSession(onnx_model.as_posix(), options, providers=["CPUExecutionProvider"]) # Compute outputs from the reference model if is_torch_available() and issubclass(type(reference_model), PreTrainedModel): reference_model.to("cpu") ref_outputs = reference_model(**reference_model_inputs) ref_outputs_dict = {} # We flatten potential collection of outputs (i.e. past_keys) to a flat structure for name, value in ref_outputs.items(): # Overwriting the output name as "present" since it is the name used for the ONNX outputs # ("past_key_values" being taken for the ONNX inputs) if name == "past_key_values": name = "present" if isinstance(value, (list, tuple)): value = config.flatten_output_collection_property(name, value) ref_outputs_dict.update(value) else: ref_outputs_dict[name] = value # Create onnxruntime inputs from the reference model inputs reference_model_inputs_onnxruntime = config.generate_dummy_inputs_onnxruntime(reference_model_inputs) # We flatten potential collection of inputs (i.e. past_keys) onnx_inputs = {} for name, value in reference_model_inputs_onnxruntime.items(): if isinstance(value, (list, tuple)): value = config.flatten_output_collection_property(name, value) onnx_inputs.update({tensor_name: pt_tensor.numpy() for tensor_name, pt_tensor in value.items()}) else: onnx_inputs[name] = value.numpy() # Compute outputs from the ONNX model onnx_outputs = session.run(onnx_named_outputs, onnx_inputs) # Check we have a subset of the keys into onnx_outputs against ref_outputs ref_outputs_set, onnx_outputs_set = set(ref_outputs_dict.keys()), set(onnx_named_outputs) if not onnx_outputs_set.issubset(ref_outputs_set): logger.info( f"\t-[x] ONNX model output names {onnx_outputs_set} do not match reference model {ref_outputs_set}" ) raise ValueError( "Outputs doesn't match between reference model and ONNX exported model: " f"{onnx_outputs_set.difference(ref_outputs_set)}" ) else: logger.info(f"\t-[✓] ONNX model output names match reference model ({onnx_outputs_set})") # Check the shape and values match for name, ort_value in zip(onnx_named_outputs, onnx_outputs): if is_torch_available() and issubclass(type(reference_model), PreTrainedModel): ref_value = ref_outputs_dict[name].detach().numpy() else: ref_value = ref_outputs_dict[name].numpy() logger.info(f'\t- Validating ONNX Model output "{name}":') # Shape if not ort_value.shape == ref_value.shape: logger.info(f"\t\t-[x] shape {ort_value.shape} doesn't match {ref_value.shape}") raise ValueError( "Outputs shape doesn't match between reference model and ONNX exported model: " f"Got {ref_value.shape} (reference) and {ort_value.shape} (ONNX)" ) else: logger.info(f"\t\t-[✓] {ort_value.shape} matches {ref_value.shape}") # Values if not np.allclose(ref_value, ort_value, atol=atol): bad_indices = np.logical_not(np.isclose(ref_value, ort_value, atol=atol)) logger.info(f"\t\t-[x] values not close enough (atol: {atol})") raise ValueError( "Outputs values doesn't match between reference model and ONNX exported model: " f"Got max absolute difference of: {np.amax(np.abs(ref_value - ort_value))} for " f"{ref_value[bad_indices]} vs {ort_value[bad_indices]}" ) else: logger.info(f"\t\t-[✓] all values close (atol: {atol})") def ensure_model_and_config_inputs_match( model: Union["PreTrainedModel", "TFPreTrainedModel"], model_inputs: Iterable[str] ) -> Tuple[bool, List[str]]: """ :param model_inputs: :param config_inputs: :return: """ if is_torch_available() and issubclass(type(model), PreTrainedModel): forward_parameters = signature(model.forward).parameters else: forward_parameters = signature(model.call).parameters model_inputs_set = set(model_inputs) # We are fine if config_inputs has more keys than model_inputs forward_inputs_set = set(forward_parameters.keys()) is_ok = model_inputs_set.issubset(forward_inputs_set) # Make sure the input order match (VERY IMPORTANT !!!!) matching_inputs = forward_inputs_set.intersection(model_inputs_set) ordered_inputs = [parameter for parameter in forward_parameters.keys() if parameter in matching_inputs] return is_ok, ordered_inputs
transformers/src/transformers/onnx/convert.py/0
{ "file_path": "transformers/src/transformers/onnx/convert.py", "repo_id": "transformers", "token_count": 7864 }
151
from typing import Any, Dict, List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import Pipeline, build_pipeline_init_args if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import ( MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING_NAMES, ) logger = logging.get_logger(__name__) Prediction = Dict[str, Any] Predictions = List[Prediction] @add_end_docstrings(build_pipeline_init_args(has_image_processor=True)) class ImageSegmentationPipeline(Pipeline): """ Image segmentation pipeline using any `AutoModelForXXXSegmentation`. This pipeline predicts masks of objects and their classes. Example: ```python >>> from transformers import pipeline >>> segmenter = pipeline(model="facebook/detr-resnet-50-panoptic") >>> segments = segmenter("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png") >>> len(segments) 2 >>> segments[0]["label"] 'bird' >>> segments[1]["label"] 'bird' >>> type(segments[0]["mask"]) # This is a black and white mask showing where is the bird on the original image. <class 'PIL.Image.Image'> >>> segments[0]["mask"].size (768, 512) ``` This image segmentation pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"image-segmentation"`. See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=image-segmentation). """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if self.framework == "tf": raise ValueError(f"The {self.__class__} is only available in PyTorch.") requires_backends(self, "vision") mapping = MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES.copy() mapping.update(MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES) mapping.update(MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING_NAMES) mapping.update(MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING_NAMES) self.check_model_type(mapping) def _sanitize_parameters(self, **kwargs): preprocess_kwargs = {} postprocess_kwargs = {} if "subtask" in kwargs: postprocess_kwargs["subtask"] = kwargs["subtask"] preprocess_kwargs["subtask"] = kwargs["subtask"] if "threshold" in kwargs: postprocess_kwargs["threshold"] = kwargs["threshold"] if "mask_threshold" in kwargs: postprocess_kwargs["mask_threshold"] = kwargs["mask_threshold"] if "overlap_mask_area_threshold" in kwargs: postprocess_kwargs["overlap_mask_area_threshold"] = kwargs["overlap_mask_area_threshold"] if "timeout" in kwargs: preprocess_kwargs["timeout"] = kwargs["timeout"] return preprocess_kwargs, {}, postprocess_kwargs def __call__(self, inputs=None, **kwargs) -> Union[Predictions, List[Prediction]]: """ Perform segmentation (detect masks & classes) in the image(s) passed as inputs. Args: inputs (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): The pipeline handles three types of images: - A string containing an HTTP(S) link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images. Images in a batch must all be in the same format: all as HTTP(S) links, all as local paths, or all as PIL images. subtask (`str`, *optional*): Segmentation task to be performed, choose [`semantic`, `instance` and `panoptic`] depending on model capabilities. If not set, the pipeline will attempt tp resolve in the following order: `panoptic`, `instance`, `semantic`. threshold (`float`, *optional*, defaults to 0.9): Probability threshold to filter out predicted masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.5): Mask overlap threshold to eliminate small, disconnected segments. timeout (`float`, *optional*, defaults to None): The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and the call may block forever. Return: A dictionary or a list of dictionaries containing the result. If the input is a single image, will return a list of dictionaries, if the input is a list of several images, will return a list of list of dictionaries corresponding to each image. The dictionaries contain the mask, label and score (where applicable) of each detected object and contains the following keys: - **label** (`str`) -- The class label identified by the model. - **mask** (`PIL.Image`) -- A binary mask of the detected object as a Pil Image of shape (width, height) of the original image. Returns a mask filled with zeros if no object is found. - **score** (*optional* `float`) -- Optionally, when the model is capable of estimating a confidence of the "object" described by the label and the mask. """ # After deprecation of this is completed, remove the default `None` value for `images` if "images" in kwargs: inputs = kwargs.pop("images") if inputs is None: raise ValueError("Cannot call the image-classification pipeline without an inputs argument!") return super().__call__(inputs, **kwargs) def preprocess(self, image, subtask=None, timeout=None): image = load_image(image, timeout=timeout) target_size = [(image.height, image.width)] if self.model.config.__class__.__name__ == "OneFormerConfig": if subtask is None: kwargs = {} else: kwargs = {"task_inputs": [subtask]} inputs = self.image_processor(images=[image], return_tensors="pt", **kwargs) if self.framework == "pt": inputs = inputs.to(self.torch_dtype) inputs["task_inputs"] = self.tokenizer( inputs["task_inputs"], padding="max_length", max_length=self.model.config.task_seq_len, return_tensors=self.framework, )["input_ids"] else: inputs = self.image_processor(images=[image], return_tensors="pt") if self.framework == "pt": inputs = inputs.to(self.torch_dtype) inputs["target_size"] = target_size return inputs def _forward(self, model_inputs): target_size = model_inputs.pop("target_size") model_outputs = self.model(**model_inputs) model_outputs["target_size"] = target_size return model_outputs def postprocess( self, model_outputs, subtask=None, threshold=0.9, mask_threshold=0.5, overlap_mask_area_threshold=0.5 ): fn = None if subtask in {"panoptic", None} and hasattr(self.image_processor, "post_process_panoptic_segmentation"): fn = self.image_processor.post_process_panoptic_segmentation elif subtask in {"instance", None} and hasattr(self.image_processor, "post_process_instance_segmentation"): fn = self.image_processor.post_process_instance_segmentation if fn is not None: outputs = fn( model_outputs, threshold=threshold, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, target_sizes=model_outputs["target_size"], )[0] annotation = [] segmentation = outputs["segmentation"] for segment in outputs["segments_info"]: mask = (segmentation == segment["id"]) * 255 mask = Image.fromarray(mask.numpy().astype(np.uint8), mode="L") label = self.model.config.id2label[segment["label_id"]] score = segment["score"] annotation.append({"score": score, "label": label, "mask": mask}) elif subtask in {"semantic", None} and hasattr(self.image_processor, "post_process_semantic_segmentation"): outputs = self.image_processor.post_process_semantic_segmentation( model_outputs, target_sizes=model_outputs["target_size"] )[0] annotation = [] segmentation = outputs.numpy() labels = np.unique(segmentation) for label in labels: mask = (segmentation == label) * 255 mask = Image.fromarray(mask.astype(np.uint8), mode="L") label = self.model.config.id2label[label] annotation.append({"score": None, "label": label, "mask": mask}) else: raise ValueError(f"Subtask {subtask} is not supported for model {type(self.model)}") return annotation
transformers/src/transformers/pipelines/image_segmentation.py/0
{ "file_path": "transformers/src/transformers/pipelines/image_segmentation.py", "repo_id": "transformers", "token_count": 4043 }
152
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import Pipeline, build_pipeline_init_args logger = logging.get_logger(__name__) @add_end_docstrings(build_pipeline_init_args(has_feature_extractor=True, has_tokenizer=True)) class ZeroShotAudioClassificationPipeline(Pipeline): """ Zero shot audio classification pipeline using `ClapModel`. This pipeline predicts the class of an audio when you provide an audio and a set of `candidate_labels`. <Tip warning={true}> The default `hypothesis_template` is : `"This is a sound of {}."`. Make sure you update it for your usage. </Tip> Example: ```python >>> from transformers import pipeline >>> from datasets import load_dataset >>> dataset = load_dataset("ashraq/esc50") >>> audio = next(iter(dataset["train"]["audio"]))["array"] >>> classifier = pipeline(task="zero-shot-audio-classification", model="laion/clap-htsat-unfused") >>> classifier(audio, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"]) [{'score': 0.9996, 'label': 'Sound of a dog'}, {'score': 0.0004, 'label': 'Sound of vaccum cleaner'}] ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This audio classification pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"zero-shot-audio-classification"`. See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=zero-shot-audio-classification). """ def __init__(self, **kwargs): super().__init__(**kwargs) if self.framework != "pt": raise ValueError(f"The {self.__class__} is only available in PyTorch.") # No specific FOR_XXX available yet def __call__(self, audios: Union[np.ndarray, bytes, str], **kwargs): """ Assign labels to the audio(s) passed as inputs. Args: audios (`str`, `List[str]`, `np.array` or `List[np.array]`): The pipeline handles three types of inputs: - A string containing a http link pointing to an audio - A string containing a local path to an audio - An audio loaded in numpy candidate_labels (`List[str]`): The candidate labels for this audio. They will be formatted using *hypothesis_template*. hypothesis_template (`str`, *optional*, defaults to `"This is a sound of {}"`): The format used in conjunction with *candidate_labels* to attempt the audio classification by replacing the placeholder with the candidate_labels. Pass "{}" if *candidate_labels* are already formatted. Return: A list of dictionaries containing one entry per proposed label. Each dictionary contains the following keys: - **label** (`str`) -- One of the suggested *candidate_labels*. - **score** (`float`) -- The score attributed by the model to that label. It is a value between 0 and 1, computed as the `softmax` of `logits_per_audio`. """ return super().__call__(audios, **kwargs) def _sanitize_parameters(self, **kwargs): preprocess_params = {} if "candidate_labels" in kwargs: preprocess_params["candidate_labels"] = kwargs["candidate_labels"] if "hypothesis_template" in kwargs: preprocess_params["hypothesis_template"] = kwargs["hypothesis_template"] return preprocess_params, {}, {} def preprocess(self, audio, candidate_labels=None, hypothesis_template="This is a sound of {}."): if isinstance(audio, str): if audio.startswith("http://") or audio.startswith("https://"): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png audio = requests.get(audio).content else: with open(audio, "rb") as f: audio = f.read() if isinstance(audio, bytes): audio = ffmpeg_read(audio, self.feature_extractor.sampling_rate) if not isinstance(audio, np.ndarray): raise TypeError("We expect a numpy ndarray as input") if len(audio.shape) != 1: raise ValueError("We expect a single channel audio input for ZeroShotAudioClassificationPipeline") inputs = self.feature_extractor( [audio], sampling_rate=self.feature_extractor.sampling_rate, return_tensors="pt" ) if self.framework == "pt": inputs = inputs.to(self.torch_dtype) inputs["candidate_labels"] = candidate_labels sequences = [hypothesis_template.format(x) for x in candidate_labels] text_inputs = self.tokenizer(sequences, return_tensors=self.framework, padding=True) inputs["text_inputs"] = [text_inputs] return inputs def _forward(self, model_inputs): candidate_labels = model_inputs.pop("candidate_labels") text_inputs = model_inputs.pop("text_inputs") if isinstance(text_inputs[0], UserDict): text_inputs = text_inputs[0] else: # Batching case. text_inputs = text_inputs[0][0] outputs = self.model(**text_inputs, **model_inputs) model_outputs = { "candidate_labels": candidate_labels, "logits": outputs.logits_per_audio, } return model_outputs def postprocess(self, model_outputs): candidate_labels = model_outputs.pop("candidate_labels") logits = model_outputs["logits"][0] if self.framework == "pt": probs = logits.softmax(dim=0) scores = probs.tolist() else: raise ValueError("`tf` framework not supported.") result = [ {"score": score, "label": candidate_label} for score, candidate_label in sorted(zip(scores, candidate_labels), key=lambda x: -x[0]) ] return result
transformers/src/transformers/pipelines/zero_shot_audio_classification.py/0
{ "file_path": "transformers/src/transformers/pipelines/zero_shot_audio_classification.py", "repo_id": "transformers", "token_count": 2703 }
153
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib from typing import TYPE_CHECKING, Any, Dict, List, Optional from packaging import version from .base import HfQuantizer if TYPE_CHECKING: from ..modeling_utils import PreTrainedModel from ..utils import is_accelerate_available, is_fbgemm_gpu_available, is_torch_available, logging from .quantizers_utils import get_module_from_name if is_torch_available(): import torch logger = logging.get_logger(__name__) class FbgemmFp8HfQuantizer(HfQuantizer): """ FP8 quantization using fbgemm kernels """ requires_parameters_quantization = True requires_calibration = False required_packages = ["fbgemm-gpu", "accelerate"] def __init__(self, quantization_config, **kwargs): super().__init__(quantization_config, **kwargs) self.quantization_config = quantization_config def validate_environment(self, *args, **kwargs): if not is_torch_available() or version.parse(importlib.metadata.version("torch")) < version.parse("2.1.0"): raise ImportError( "Using fbgemm fp8 quantization requires torch > 2.1.0" "Please install the latest version of torch ( pip install --upgrade torch )" ) if not is_fbgemm_gpu_available(): raise ImportError( "Using fbgemm fp8 quantization requires fbgemm-gpu library" "Please install the latest version of fbgemm-gpu library by following : https://pytorch.org/FBGEMM/fbgemm_gpu-development/InstallationInstructions.html#fbgemm-gpu-install-libraries" ) if not is_accelerate_available("0.32.2"): raise ImportError( "Loading an FP8 quantized model requires accelerate > 0.32.1 (`pip install --upgrade accelerate`)" ) if not torch.cuda.is_available(): raise RuntimeError("Using FP8 quantized models with fbgemm kernels requires a GPU") compute_capability = torch.cuda.get_device_capability() major, minor = compute_capability if major < 9: raise ValueError( "FP8 quantized models is only supported on GPUs with compute capability >= 9.0 (e.g H100)" ) device_map = kwargs.get("device_map", None) if device_map is None: logger.warning_once( "You have loaded an FP8 model on CPU and have a CUDA device available, make sure to set " "your model on a GPU device in order to run your model. To remove this warning, pass device_map = 'cuda'. " ) elif device_map is not None: if ( not self.pre_quantized and isinstance(device_map, dict) and ("cpu" in device_map.values() or "disk" in device_map.values()) ): raise ValueError( "You are attempting to load an FP8 model with a device_map that contains a CPU or disk device." "This is not supported when the model is quantized on the fly. " "Please use a quantized checkpoint or remove the CPU or disk device from the device_map." ) def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype": if torch_dtype is None: torch_dtype = torch.bfloat16 logger.info( "Overriding torch_dtype=%s with `torch_dtype=torch.bloat16` due to " "requirements of `fbgemm-gpu` to enable model loading in fp8. " "Pass your own torch_dtype to specify the dtype of the remaining non-linear layers or pass" " torch_dtype=torch.bfloat16 to remove this warning.", torch_dtype, ) elif torch_dtype == torch.float16: raise ValueError( "You cannot use FP8 with torch_dtype=torch.float16." "We recommend you passing torch_dtype=torch.bfloat16" ) return torch_dtype def check_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, state_dict: Dict[str, Any], **kwargs, ): from ..integrations import FbgemmFp8Linear module, tensor_name = get_module_from_name(model, param_name) if isinstance(module, FbgemmFp8Linear): if self.pre_quantized or tensor_name == "bias": if tensor_name == "weight" and param_value.dtype != torch.float8_e4m3fn: raise ValueError("Expect quantized weights but got an unquantized weight") return False else: if tensor_name == "weight_scale": raise ValueError("Expect unquantized weights but got a quantized weight_scale") return True return False def create_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, target_device: "torch.device", state_dict: Dict[str, Any], unexpected_keys: Optional[List[str]] = None, ): """ Quantizes weights into weight and weight_scale """ new_value, weight_scale = torch.ops.fbgemm.quantize_fp8_per_row(param_value) module, tensor_name = get_module_from_name(model, param_name) module._buffers[tensor_name] = new_value.to(target_device) # to have the right output shape -> (out_features, 1) module._buffers["weight_scale"] = weight_scale.view(weight_scale.shape[0], 1).to(target_device) if unexpected_keys is not None and param_name in unexpected_keys: unexpected_keys.remove(param_name) del param_name def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs): return model def _process_model_before_weight_loading( self, model: "PreTrainedModel", device_map, keep_in_fp32_modules: List[str] = [], **kwargs, ): from ..integrations import get_keys_to_not_convert, replace_with_fbgemm_fp8_linear self.modules_to_not_convert = get_keys_to_not_convert(model) if self.quantization_config.modules_to_not_convert is not None: self.modules_to_not_convert.extend(self.quantization_config.modules_to_not_convert) model = replace_with_fbgemm_fp8_linear( model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config, pre_quantized=self.pre_quantized, ) model.config.quantization_config = self.quantization_config def update_missing_keys(self, model, missing_keys: List[str], prefix: str) -> List[str]: from ..integrations import FbgemmFp8Linear not_missing_keys = [] for name, module in model.named_modules(): if isinstance(module, FbgemmFp8Linear): for missing in missing_keys: if ( (name in missing or name in f"{prefix}.{missing}") and not missing.endswith(".weight") and not missing.endswith(".bias") ): not_missing_keys.append(missing) return [k for k in missing_keys if k not in not_missing_keys] def is_serializable(self, safe_serialization=None): return True @property def is_trainable(self) -> bool: return False
transformers/src/transformers/quantizers/quantizer_fbgemm_fp8.py/0
{ "file_path": "transformers/src/transformers/quantizers/quantizer_fbgemm_fp8.py", "repo_id": "transformers", "token_count": 3522 }
154
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Base classes common to both the slow and the fast tokenization classes: PreTrainedTokenizerBase (host all the user fronting encoding methods) Special token mixing (host the special tokens logic) and BatchEncoding (wrap the dictionary of output with special method for the Fast tokenizers) """ import copy import json import os import re import warnings from collections import UserDict from collections.abc import Mapping, Sized from contextlib import contextmanager from dataclasses import dataclass from inspect import isfunction from typing import TYPE_CHECKING, Any, Callable, Dict, List, NamedTuple, Optional, Sequence, Tuple, Union import numpy as np from packaging import version from . import __version__ from .dynamic_module_utils import custom_object_save from .utils import ( ExplicitEnum, PaddingStrategy, PushToHubMixin, TensorType, add_end_docstrings, add_model_info_to_auto_map, add_model_info_to_custom_pipelines, cached_file, copy_func, download_url, extract_commit_hash, get_json_schema, is_flax_available, is_jax_tensor, is_mlx_available, is_numpy_array, is_offline_mode, is_protobuf_available, is_remote_url, is_tf_available, is_tf_tensor, is_tokenizers_available, is_torch_available, is_torch_device, is_torch_tensor, logging, requires_backends, to_py_obj, ) from .utils.chat_template_utils import _compile_jinja_template, _render_with_assistant_indices from .utils.import_utils import PROTOBUF_IMPORT_ERROR if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf if is_flax_available(): import jax.numpy as jnp # noqa: F401 def import_protobuf_decode_error(error_message=""): if is_protobuf_available(): from google.protobuf.message import DecodeError return DecodeError else: raise ImportError(PROTOBUF_IMPORT_ERROR.format(error_message)) if is_tokenizers_available(): from tokenizers import AddedToken from tokenizers import Encoding as EncodingFast else: @dataclass(frozen=False, eq=True) class AddedToken: """ AddedToken represents a token to be added to a Tokenizer An AddedToken can have special options defining the way it should behave. The `normalized` will default to `not special` if it is not specified, similarly to the definition in `tokenizers`. """ def __init__( self, content: str, single_word=False, lstrip=False, rstrip=False, special=False, normalized=None ): self.content = content self.single_word = single_word self.lstrip = lstrip self.rstrip = rstrip self.special = special self.normalized = normalized if normalized is not None else not special def __getstate__(self): return self.__dict__ def __str__(self): return self.content @dataclass class EncodingFast: """This is dummy class because without the `tokenizers` library we don't have these objects anyway""" pass logger = logging.get_logger(__name__) VERY_LARGE_INTEGER = int(1e30) # This is used to set the max input length for a model with infinite size input LARGE_INTEGER = int(1e20) # This is used when we need something big but slightly smaller than VERY_LARGE_INTEGER # Define type aliases and NamedTuples TextInput = str PreTokenizedInput = List[str] EncodedInput = List[int] TextInputPair = Tuple[str, str] PreTokenizedInputPair = Tuple[List[str], List[str]] EncodedInputPair = Tuple[List[int], List[int]] # Define type aliases for text-related non-text modalities AudioInput = Union["np.ndarray", "torch.Tensor", List["np.ndarray"], List["torch.Tensor"]] # Slow tokenizers used to be saved in three separated files SPECIAL_TOKENS_MAP_FILE = "special_tokens_map.json" ADDED_TOKENS_FILE = "added_tokens.json" TOKENIZER_CONFIG_FILE = "tokenizer_config.json" CHAT_TEMPLATE_FILE = "chat_template.jinja" # Fast tokenizers (provided by HuggingFace tokenizer's library) can be saved in a single file FULL_TOKENIZER_FILE = "tokenizer.json" _re_tokenizer_file = re.compile(r"tokenizer\.(.*)\.json") class TruncationStrategy(ExplicitEnum): """ Possible values for the `truncation` argument in [`PreTrainedTokenizerBase.__call__`]. Useful for tab-completion in an IDE. """ ONLY_FIRST = "only_first" ONLY_SECOND = "only_second" LONGEST_FIRST = "longest_first" DO_NOT_TRUNCATE = "do_not_truncate" class CharSpan(NamedTuple): """ Character span in the original string. Args: start (`int`): Index of the first character in the original string. end (`int`): Index of the character following the last character in the original string. """ start: int end: int class TokenSpan(NamedTuple): """ Token span in an encoded string (list of tokens). Args: start (`int`): Index of the first token in the span. end (`int`): Index of the token following the last token in the span. """ start: int end: int class BatchEncoding(UserDict): """ Holds the output of the [`~tokenization_utils_base.PreTrainedTokenizerBase.__call__`], [`~tokenization_utils_base.PreTrainedTokenizerBase.encode_plus`] and [`~tokenization_utils_base.PreTrainedTokenizerBase.batch_encode_plus`] methods (tokens, attention_masks, etc). This class is derived from a python dictionary and can be used as a dictionary. In addition, this class exposes utility methods to map from word/character space to token space. Args: data (`dict`, *optional*): Dictionary of lists/arrays/tensors returned by the `__call__`/`encode_plus`/`batch_encode_plus` methods ('input_ids', 'attention_mask', etc.). encoding (`tokenizers.Encoding` or `Sequence[tokenizers.Encoding]`, *optional*): If the tokenizer is a fast tokenizer which outputs additional information like mapping from word/character space to token space the `tokenizers.Encoding` instance or list of instance (for batches) hold this information. tensor_type (`Union[None, str, TensorType]`, *optional*): You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at initialization. prepend_batch_axis (`bool`, *optional*, defaults to `False`): Whether or not to add a batch axis when converting to tensors (see `tensor_type` above). Note that this parameter has an effect if the parameter `tensor_type` is set, *otherwise has no effect*. n_sequences (`Optional[int]`, *optional*): You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at initialization. """ def __init__( self, data: Optional[Dict[str, Any]] = None, encoding: Optional[Union[EncodingFast, Sequence[EncodingFast]]] = None, tensor_type: Union[None, str, TensorType] = None, prepend_batch_axis: bool = False, n_sequences: Optional[int] = None, ): super().__init__(data) if isinstance(encoding, EncodingFast): encoding = [encoding] self._encodings = encoding if n_sequences is None and encoding is not None and len(encoding): n_sequences = encoding[0].n_sequences self._n_sequences = n_sequences self.convert_to_tensors(tensor_type=tensor_type, prepend_batch_axis=prepend_batch_axis) @property def n_sequences(self) -> Optional[int]: """ `Optional[int]`: The number of sequences used to generate each sample from the batch encoded in this [`BatchEncoding`]. Currently can be one of `None` (unknown), `1` (a single sentence) or `2` (a pair of sentences) """ return self._n_sequences @property def is_fast(self) -> bool: """ `bool`: Indicate whether this [`BatchEncoding`] was generated from the result of a [`PreTrainedTokenizerFast`] or not. """ return self._encodings is not None def __getitem__(self, item: Union[int, str]) -> Union[Any, EncodingFast]: """ If the key is a string, returns the value of the dict associated to `key` ('input_ids', 'attention_mask', etc.). If the key is an integer, get the `tokenizers.Encoding` for batch item with index `key`. If the key is a slice, returns the value of the dict associated to `key` ('input_ids', 'attention_mask', etc.) with the constraint of slice. """ if isinstance(item, str): return self.data[item] elif self._encodings is not None: return self._encodings[item] elif isinstance(item, slice): return {key: self.data[key][item] for key in self.data.keys()} else: raise KeyError( "Invalid key. Only three types of key are available: " "(1) string, (2) integers for backend Encoding, and (3) slices for data subsetting." ) def __getattr__(self, item: str): try: return self.data[item] except KeyError: raise AttributeError def __getstate__(self): return {"data": self.data, "encodings": self._encodings} def __setstate__(self, state): if "data" in state: self.data = state["data"] if "encodings" in state: self._encodings = state["encodings"] def keys(self): return self.data.keys() def values(self): return self.data.values() def items(self): return self.data.items() # After this point: # Extended properties and methods only available for fast (Rust-based) tokenizers # provided by HuggingFace tokenizers library. @property def encodings(self) -> Optional[List[EncodingFast]]: """ `Optional[List[tokenizers.Encoding]]`: The list all encodings from the tokenization process. Returns `None` if the input was tokenized through Python (i.e., not a fast) tokenizer. """ return self._encodings def tokens(self, batch_index: int = 0) -> List[str]: """ Return the list of tokens (sub-parts of the input strings after word/subword splitting and before conversion to integer indices) at a given batch index (only works for the output of a fast tokenizer). Args: batch_index (`int`, *optional*, defaults to 0): The index to access in the batch. Returns: `List[str]`: The list of tokens at that index. """ if not self._encodings: raise ValueError( "tokens() is not available when using non-fast tokenizers (e.g. instance of a `XxxTokenizerFast`" " class)." ) return self._encodings[batch_index].tokens def sequence_ids(self, batch_index: int = 0) -> List[Optional[int]]: """ Return a list mapping the tokens to the id of their original sentences: - `None` for special tokens added around or between sequences, - `0` for tokens corresponding to words in the first sequence, - `1` for tokens corresponding to words in the second sequence when a pair of sequences was jointly encoded. Args: batch_index (`int`, *optional*, defaults to 0): The index to access in the batch. Returns: `List[Optional[int]]`: A list indicating the sequence id corresponding to each token. Special tokens added by the tokenizer are mapped to `None` and other tokens are mapped to the index of their corresponding sequence. """ if not self._encodings: raise ValueError( "sequence_ids() is not available when using non-fast tokenizers (e.g. instance of a `XxxTokenizerFast`" " class)." ) return self._encodings[batch_index].sequence_ids def words(self, batch_index: int = 0) -> List[Optional[int]]: """ Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer. Args: batch_index (`int`, *optional*, defaults to 0): The index to access in the batch. Returns: `List[Optional[int]]`: A list indicating the word corresponding to each token. Special tokens added by the tokenizer are mapped to `None` and other tokens are mapped to the index of their corresponding word (several tokens will be mapped to the same word index if they are parts of that word). """ if not self._encodings: raise ValueError( "words() is not available when using non-fast tokenizers (e.g. instance of a `XxxTokenizerFast`" " class)." ) warnings.warn( "`BatchEncoding.words()` property is deprecated and should be replaced with the identical, " "but more self-explanatory `BatchEncoding.word_ids()` property.", FutureWarning, ) return self.word_ids(batch_index) def word_ids(self, batch_index: int = 0) -> List[Optional[int]]: """ Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer. Args: batch_index (`int`, *optional*, defaults to 0): The index to access in the batch. Returns: `List[Optional[int]]`: A list indicating the word corresponding to each token. Special tokens added by the tokenizer are mapped to `None` and other tokens are mapped to the index of their corresponding word (several tokens will be mapped to the same word index if they are parts of that word). """ if not self._encodings: raise ValueError( "word_ids() is not available when using non-fast tokenizers (e.g. instance of a `XxxTokenizerFast`" " class)." ) return self._encodings[batch_index].word_ids def token_to_sequence(self, batch_or_token_index: int, token_index: Optional[int] = None) -> int: """ Get the index of the sequence represented by the given token. In the general use case, this method returns `0` for a single sequence or the first sequence of a pair, and `1` for the second sequence of a pair Can be called as: - `self.token_to_sequence(token_index)` if batch size is 1 - `self.token_to_sequence(batch_index, token_index)` if batch size is greater than 1 This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e., words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words. Args: batch_or_token_index (`int`): Index of the sequence in the batch. If the batch only comprises one sequence, this can be the index of the token in the sequence. token_index (`int`, *optional*): If a batch index is provided in *batch_or_token_index*, this can be the index of the token in the sequence. Returns: `int`: Index of the word in the input sequence. """ if not self._encodings: raise ValueError("token_to_sequence() is not available when using Python based tokenizers") if token_index is not None: batch_index = batch_or_token_index else: batch_index = 0 token_index = batch_or_token_index if batch_index < 0: batch_index = self._batch_size + batch_index if token_index < 0: token_index = self._seq_len + token_index return self._encodings[batch_index].token_to_sequence(token_index) def token_to_word(self, batch_or_token_index: int, token_index: Optional[int] = None) -> int: """ Get the index of the word corresponding (i.e. comprising) to an encoded token in a sequence of the batch. Can be called as: - `self.token_to_word(token_index)` if batch size is 1 - `self.token_to_word(batch_index, token_index)` if batch size is greater than 1 This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e., words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words. Args: batch_or_token_index (`int`): Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the token in the sequence. token_index (`int`, *optional*): If a batch index is provided in *batch_or_token_index*, this can be the index of the token in the sequence. Returns: `int`: Index of the word in the input sequence. """ if not self._encodings: raise ValueError("token_to_word() is not available when using Python based tokenizers") if token_index is not None: batch_index = batch_or_token_index else: batch_index = 0 token_index = batch_or_token_index if batch_index < 0: batch_index = self._batch_size + batch_index if token_index < 0: token_index = self._seq_len + token_index return self._encodings[batch_index].token_to_word(token_index) def word_to_tokens( self, batch_or_word_index: int, word_index: Optional[int] = None, sequence_index: int = 0 ) -> Optional[TokenSpan]: """ Get the encoded token span corresponding to a word in a sequence of the batch. Token spans are returned as a [`~tokenization_utils_base.TokenSpan`] with: - **start** -- Index of the first token. - **end** -- Index of the token following the last token. Can be called as: - `self.word_to_tokens(word_index, sequence_index: int = 0)` if batch size is 1 - `self.word_to_tokens(batch_index, word_index, sequence_index: int = 0)` if batch size is greater or equal to 1 This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words. Args: batch_or_word_index (`int`): Index of the sequence in the batch. If the batch only comprises one sequence, this can be the index of the word in the sequence. word_index (`int`, *optional*): If a batch index is provided in *batch_or_token_index*, this can be the index of the word in the sequence. sequence_index (`int`, *optional*, defaults to 0): If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0 or 1) the provided word index belongs to. Returns: ([`~tokenization_utils_base.TokenSpan`], *optional*): Span of tokens in the encoded sequence. Returns `None` if no tokens correspond to the word. This can happen especially when the token is a special token that has been used to format the tokenization. For example when we add a class token at the very beginning of the tokenization. """ if not self._encodings: raise ValueError("word_to_tokens() is not available when using Python based tokenizers") if word_index is not None: batch_index = batch_or_word_index else: batch_index = 0 word_index = batch_or_word_index if batch_index < 0: batch_index = self._batch_size + batch_index if word_index < 0: word_index = self._seq_len + word_index span = self._encodings[batch_index].word_to_tokens(word_index, sequence_index) return TokenSpan(*span) if span is not None else None def token_to_chars(self, batch_or_token_index: int, token_index: Optional[int] = None) -> CharSpan: """ Get the character span corresponding to an encoded token in a sequence of the batch. Character spans are returned as a [`~tokenization_utils_base.CharSpan`] with: - **start** -- Index of the first character in the original string associated to the token. - **end** -- Index of the character following the last character in the original string associated to the token. Can be called as: - `self.token_to_chars(token_index)` if batch size is 1 - `self.token_to_chars(batch_index, token_index)` if batch size is greater or equal to 1 Args: batch_or_token_index (`int`): Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the token in the sequence. token_index (`int`, *optional*): If a batch index is provided in *batch_or_token_index*, this can be the index of the token or tokens in the sequence. Returns: [`~tokenization_utils_base.CharSpan`]: Span of characters in the original string, or None, if the token (e.g. <s>, </s>) doesn't correspond to any chars in the origin string. """ if not self._encodings: raise ValueError("token_to_chars() is not available when using Python based tokenizers") if token_index is not None: batch_index = batch_or_token_index else: batch_index = 0 token_index = batch_or_token_index span_indices = self._encodings[batch_index].token_to_chars(token_index) return CharSpan(*span_indices) if span_indices is not None else None def char_to_token( self, batch_or_char_index: int, char_index: Optional[int] = None, sequence_index: int = 0 ) -> int: """ Get the index of the token in the encoded output comprising a character in the original string for a sequence of the batch. Can be called as: - `self.char_to_token(char_index)` if batch size is 1 - `self.char_to_token(batch_index, char_index)` if batch size is greater or equal to 1 This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words. Args: batch_or_char_index (`int`): Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the word in the sequence char_index (`int`, *optional*): If a batch index is provided in *batch_or_token_index*, this can be the index of the word in the sequence. sequence_index (`int`, *optional*, defaults to 0): If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0 or 1) the provided character index belongs to. Returns: `int`: Index of the token, or None if the char index refers to a whitespace only token and whitespace is trimmed with `trim_offsets=True`. """ if not self._encodings: raise ValueError("char_to_token() is not available when using Python based tokenizers") if char_index is not None: batch_index = batch_or_char_index else: batch_index = 0 char_index = batch_or_char_index return self._encodings[batch_index].char_to_token(char_index, sequence_index) def word_to_chars( self, batch_or_word_index: int, word_index: Optional[int] = None, sequence_index: int = 0 ) -> CharSpan: """ Get the character span in the original string corresponding to given word in a sequence of the batch. Character spans are returned as a CharSpan NamedTuple with: - start: index of the first character in the original string - end: index of the character following the last character in the original string Can be called as: - `self.word_to_chars(word_index)` if batch size is 1 - `self.word_to_chars(batch_index, word_index)` if batch size is greater or equal to 1 Args: batch_or_word_index (`int`): Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the word in the sequence word_index (`int`, *optional*): If a batch index is provided in *batch_or_token_index*, this can be the index of the word in the sequence. sequence_index (`int`, *optional*, defaults to 0): If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0 or 1) the provided word index belongs to. Returns: `CharSpan` or `List[CharSpan]`: Span(s) of the associated character or characters in the string. CharSpan are NamedTuple with: - start: index of the first character associated to the token in the original string - end: index of the character following the last character associated to the token in the original string """ if not self._encodings: raise ValueError("word_to_chars() is not available when using Python based tokenizers") if word_index is not None: batch_index = batch_or_word_index else: batch_index = 0 word_index = batch_or_word_index return CharSpan(*(self._encodings[batch_index].word_to_chars(word_index, sequence_index))) def char_to_word(self, batch_or_char_index: int, char_index: Optional[int] = None, sequence_index: int = 0) -> int: """ Get the word in the original string corresponding to a character in the original string of a sequence of the batch. Can be called as: - `self.char_to_word(char_index)` if batch size is 1 - `self.char_to_word(batch_index, char_index)` if batch size is greater than 1 This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words. Args: batch_or_char_index (`int`): Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the character in the original string. char_index (`int`, *optional*): If a batch index is provided in *batch_or_token_index*, this can be the index of the character in the original string. sequence_index (`int`, *optional*, defaults to 0): If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0 or 1) the provided character index belongs to. Returns: `int` or `List[int]`: Index or indices of the associated encoded token(s). """ if not self._encodings: raise ValueError("char_to_word() is not available when using Python based tokenizers") if char_index is not None: batch_index = batch_or_char_index else: batch_index = 0 char_index = batch_or_char_index return self._encodings[batch_index].char_to_word(char_index, sequence_index) def convert_to_tensors( self, tensor_type: Optional[Union[str, TensorType]] = None, prepend_batch_axis: bool = False ): """ Convert the inner content to tensors. Args: tensor_type (`str` or [`~utils.TensorType`], *optional*): The type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If `None`, no modification is done. prepend_batch_axis (`int`, *optional*, defaults to `False`): Whether or not to add the batch dimension during the conversion. """ if tensor_type is None: return self # Convert to TensorType if not isinstance(tensor_type, TensorType): tensor_type = TensorType(tensor_type) # Get a function reference for the correct framework if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError( "Unable to convert output to TensorFlow tensors format, TensorFlow is not installed." ) import tensorflow as tf as_tensor = tf.constant is_tensor = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed.") import torch is_tensor = torch.is_tensor def as_tensor(value, dtype=None): if isinstance(value, list) and isinstance(value[0], np.ndarray): return torch.from_numpy(np.array(value)) return torch.tensor(value) elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed.") import jax.numpy as jnp # noqa: F811 as_tensor = jnp.array is_tensor = is_jax_tensor elif tensor_type == TensorType.MLX: if not is_mlx_available(): raise ImportError("Unable to convert output to MLX tensors format, MLX is not installed.") import mlx.core as mx as_tensor = mx.array def is_tensor(obj): return isinstance(obj, mx.array) else: def as_tensor(value, dtype=None): if isinstance(value, (list, tuple)) and isinstance(value[0], (list, tuple, np.ndarray)): value_lens = [len(val) for val in value] if len(set(value_lens)) > 1 and dtype is None: # we have a ragged list so handle explicitly value = as_tensor([np.asarray(val) for val in value], dtype=object) return np.asarray(value, dtype=dtype) is_tensor = is_numpy_array # Do the tensor conversion in batch for key, value in self.items(): try: if prepend_batch_axis: value = [value] if not is_tensor(value): tensor = as_tensor(value) # Removing this for now in favor of controlling the shape with `prepend_batch_axis` # # at-least2d # if tensor.ndim > 2: # tensor = tensor.squeeze(0) # elif tensor.ndim < 2: # tensor = tensor[None, :] self[key] = tensor except Exception as e: if key == "overflowing_tokens": raise ValueError( "Unable to create tensor returning overflowing tokens of different lengths. " "Please see if a fast version of this tokenizer is available to have this feature available." ) from e raise ValueError( "Unable to create tensor, you should probably activate truncation and/or padding with" " 'padding=True' 'truncation=True' to have batched tensors with the same length. Perhaps your" f" features (`{key}` in this case) have excessive nesting (inputs type `list` where type `int` is" " expected)." ) from e return self def to(self, device: Union[str, "torch.device"], *, non_blocking: bool = False) -> "BatchEncoding": """ Send all values to device by calling `v.to(device, non_blocking=non_blocking)` (PyTorch only). Args: device (`str` or `torch.device`): The device to put the tensors on. non_blocking (`bool`): Whether to perform the copy asynchronously. Returns: [`BatchEncoding`]: The same instance after modification. """ requires_backends(self, ["torch"]) import torch # This check catches things like APEX blindly calling "to" on all inputs to a module # Otherwise it passes the casts down and casts the LongTensor containing the token idxs # into a HalfTensor if isinstance(device, str) or is_torch_device(device) or isinstance(device, int): self.data = { k: v.to(device=device, non_blocking=non_blocking) if isinstance(v, torch.Tensor) else v for k, v in self.data.items() } else: logger.warning(f"Attempting to cast a BatchEncoding to type {str(device)}. This is not supported.") return self class SpecialTokensMixin: """ A mixin derived by [`PreTrainedTokenizer`] and [`PreTrainedTokenizerFast`] to handle specific behaviors related to special tokens. In particular, this class hold the attributes which can be used to directly access these special tokens in a model-independent manner and allow to set and update the special tokens. Args: bos_token (`str` or `tokenizers.AddedToken`, *optional*): A special token representing the beginning of a sentence. eos_token (`str` or `tokenizers.AddedToken`, *optional*): A special token representing the end of a sentence. unk_token (`str` or `tokenizers.AddedToken`, *optional*): A special token representing an out-of-vocabulary token. sep_token (`str` or `tokenizers.AddedToken`, *optional*): A special token separating two different sentences in the same input (used by BERT for instance). pad_token (`str` or `tokenizers.AddedToken`, *optional*): A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by attention mechanisms or loss computation. cls_token (`str` or `tokenizers.AddedToken`, *optional*): A special token representing the class of the input (used by BERT for instance). mask_token (`str` or `tokenizers.AddedToken`, *optional*): A special token representing a masked token (used by masked-language modeling pretraining objectives, like BERT). additional_special_tokens (tuple or list of `str` or `tokenizers.AddedToken`, *optional*): A tuple or a list of additional tokens, which will be marked as `special`, meaning that they will be skipped when decoding if `skip_special_tokens` is set to `True`. """ SPECIAL_TOKENS_ATTRIBUTES = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", "additional_special_tokens", ] def __init__(self, verbose=False, **kwargs): self._pad_token_type_id = 0 self.verbose = verbose self._special_tokens_map = {attr: None for attr in self.SPECIAL_TOKENS_ATTRIBUTES} self._special_tokens_map["additional_special_tokens"] = [] # for BC where it defaults to empty list # We directly set the hidden value to allow initialization with special tokens # which are not yet in the vocabulary. Necessary for serialization/de-serialization # TODO clean this up at some point (probably by switching to fast tokenizers) for key, value in kwargs.items(): if value is None: continue if key in self.SPECIAL_TOKENS_ATTRIBUTES: if key == "additional_special_tokens": assert isinstance(value, (list, tuple)), f"Value {value} is not a list or tuple" assert all( isinstance(t, (str, AddedToken)) for t in value ), "One of the tokens is not a string or an AddedToken" setattr(self, key, value) elif isinstance(value, (str, AddedToken)): setattr(self, key, value) else: raise TypeError(f"Special token {key} has to be either str or AddedToken but got: {type(value)}") def sanitize_special_tokens(self) -> int: """ The `sanitize_special_tokens` is now deprecated kept for backward compatibility and will be removed in transformers v5. """ logger.warning_once("The `sanitize_special_tokens` will be removed in transformers v5.") return self.add_tokens(self.all_special_tokens_extended, special_tokens=True) def add_special_tokens( self, special_tokens_dict: Dict[str, Union[str, AddedToken]], replace_additional_special_tokens=True ) -> int: """ Add a dictionary of special tokens (eos, pad, cls, etc.) to the encoder and link them to class attributes. If special tokens are NOT in the vocabulary, they are added to it (indexed starting from the last index of the current vocabulary). When adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix of the model so that its embedding matrix matches the tokenizer. In order to do that, please use the [`~PreTrainedModel.resize_token_embeddings`] method. Using `add_special_tokens` will ensure your special tokens can be used in several ways: - Special tokens can be skipped when decoding using `skip_special_tokens = True`. - Special tokens are carefully handled by the tokenizer (they are never split), similar to `AddedTokens`. - You can easily refer to special tokens using tokenizer class attributes like `tokenizer.cls_token`. This makes it easy to develop model-agnostic training and fine-tuning scripts. When possible, special tokens are already registered for provided pretrained models (for instance [`BertTokenizer`] `cls_token` is already registered to be :obj*'[CLS]'* and XLM's one is also registered to be `'</s>'`). Args: special_tokens_dict (dictionary *str* to *str* or `tokenizers.AddedToken`): Keys should be in the list of predefined special attributes: [`bos_token`, `eos_token`, `unk_token`, `sep_token`, `pad_token`, `cls_token`, `mask_token`, `additional_special_tokens`]. Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the `unk_token` to them). replace_additional_special_tokens (`bool`, *optional*,, defaults to `True`): If `True`, the existing list of additional special tokens will be replaced by the list provided in `special_tokens_dict`. Otherwise, `self._special_tokens_map["additional_special_tokens"]` is just extended. In the former case, the tokens will NOT be removed from the tokenizer's full vocabulary - they are only being flagged as non-special tokens. Remember, this only affects which tokens are skipped during decoding, not the `added_tokens_encoder` and `added_tokens_decoder`. This means that the previous `additional_special_tokens` are still added tokens, and will not be split by the model. Returns: `int`: Number of tokens added to the vocabulary. Examples: ```python # Let's see how to add a new classification token to GPT-2 tokenizer = GPT2Tokenizer.from_pretrained("openai-community/gpt2") model = GPT2Model.from_pretrained("openai-community/gpt2") special_tokens_dict = {"cls_token": "<CLS>"} num_added_toks = tokenizer.add_special_tokens(special_tokens_dict) print("We have added", num_added_toks, "tokens") # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer. model.resize_token_embeddings(len(tokenizer)) assert tokenizer.cls_token == "<CLS>" ```""" if not special_tokens_dict: return 0 added_tokens = [] for key, value in special_tokens_dict.items(): assert key in self.SPECIAL_TOKENS_ATTRIBUTES, f"Key {key} is not a special token" if self.verbose: logger.info(f"Assigning {value} to the {key} key of the tokenizer") if key == "additional_special_tokens": assert isinstance(value, (list, tuple)) and all( isinstance(t, (str, AddedToken)) for t in value ), f"Tokens {value} for key {key} should all be str or AddedToken instances" to_add = [] for token in value: if isinstance(token, str): # for legacy purpose we default to stripping. `test_add_tokens_tokenizer` depends on this token = AddedToken(token, rstrip=False, lstrip=False, normalized=False, special=True) if not replace_additional_special_tokens and str(token) in self.additional_special_tokens: continue to_add.append(token) if replace_additional_special_tokens and len(to_add) > 0: setattr(self, key, list(to_add)) else: self._special_tokens_map["additional_special_tokens"].extend(to_add) added_tokens += to_add else: if not isinstance(value, (str, AddedToken)): raise ValueError(f"Token {value} for key {key} should be a str or an AddedToken instance") if isinstance(value, (str)): # for legacy purpose we default to stripping. `False` depends on this value = AddedToken(value, rstrip=False, lstrip=False, normalized=False, special=True) if isinstance(value, AddedToken): setattr(self, key, value) if value not in added_tokens: added_tokens.append(value) # if we are adding tokens that were not part of the vocab, we ought to add them added_tokens = self.add_tokens(added_tokens, special_tokens=True) return added_tokens def add_tokens( self, new_tokens: Union[str, AddedToken, List[Union[str, AddedToken]]], special_tokens: bool = False ) -> int: """ Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to it with indices starting from length of the current vocabulary and will be isolated before the tokenization algorithm is applied. Added tokens and tokens from the vocabulary of the tokenization algorithm are therefore not treated in the same way. Note, when adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix of the model so that its embedding matrix matches the tokenizer. In order to do that, please use the [`~PreTrainedModel.resize_token_embeddings`] method. Args: new_tokens (`str`, `tokenizers.AddedToken` or a list of *str* or `tokenizers.AddedToken`): Tokens are only added if they are not already in the vocabulary. `tokenizers.AddedToken` wraps a string token to let you personalize its behavior: whether this token should only match against a single word, whether this token should strip all potential whitespaces on the left side, whether this token should strip all potential whitespaces on the right side, etc. special_tokens (`bool`, *optional*, defaults to `False`): Can be used to specify if the token is a special token. This mostly change the normalization behavior (special tokens like CLS or [MASK] are usually not lower-cased for instance). See details for `tokenizers.AddedToken` in HuggingFace tokenizers library. Returns: `int`: Number of tokens added to the vocabulary. Examples: ```python # Let's see how to increase the vocabulary of Bert model and tokenizer tokenizer = BertTokenizerFast.from_pretrained("google-bert/bert-base-uncased") model = BertModel.from_pretrained("google-bert/bert-base-uncased") num_added_toks = tokenizer.add_tokens(["new_tok1", "my_new-tok2"]) print("We have added", num_added_toks, "tokens") # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer. model.resize_token_embeddings(len(tokenizer)) ```""" if not new_tokens: return 0 if not isinstance(new_tokens, (list, tuple)): new_tokens = [new_tokens] return self._add_tokens(new_tokens, special_tokens=special_tokens) def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int: raise NotImplementedError @property def pad_token_type_id(self) -> int: """ `int`: Id of the padding token type in the vocabulary. """ return self._pad_token_type_id def __setattr__(self, key, value): key_without_id = key key_is_special_id = key.endswith("_id") or key.endswith("_ids") if key_is_special_id: key_without_id = key[:-3] if not key.endswith("_ids") else key[:-4] if self.__dict__.get("_special_tokens_map", None) is not None and any( name in self.__dict__["_special_tokens_map"] for name in [key, key_without_id] ): if key_is_special_id: if value is not None: value = ( self.convert_ids_to_tokens(value) if key != "additional_special_tokens" else [self.convert_ids_to_tokens(val) for val in value] ) key = key_without_id if key != "additional_special_tokens" and not isinstance(value, (str, AddedToken)) and value is not None: raise ValueError(f"Cannot set a non-string value as the {key}") self._special_tokens_map[key] = value else: super().__setattr__(key, value) def __getattr__(self, key): key_without_id = key key_is_special_id = key.endswith("_id") or key.endswith("_ids") if key_is_special_id: key_without_id = key[:-3] if not key.endswith("_ids") else key[:-4] if self.__dict__.get("_special_tokens_map", None) is not None and any( name in self.__dict__["_special_tokens_map"] for name in [key, key_without_id] ): _special_tokens_map = self.__dict__["_special_tokens_map"] if not key_is_special_id: if _special_tokens_map[key] is None: if self.verbose: logger.error(f"Using {key}, but it is not set yet.") return None value = _special_tokens_map[key] return str(value) if key != "additional_special_tokens" else [str(tok) for tok in value] else: attr_as_tokens = getattr(self, key_without_id) return self.convert_tokens_to_ids(attr_as_tokens) if attr_as_tokens is not None else None if key not in self.__dict__: raise AttributeError(f"{self.__class__.__name__} has no attribute {key}") else: return super().__getattr__(key) @property def special_tokens_map(self) -> Dict[str, Union[str, List[str]]]: """ `Dict[str, Union[str, List[str]]]`: A dictionary mapping special token class attributes (`cls_token`, `unk_token`, etc.) to their values (`'<unk>'`, `'<cls>'`, etc.). Convert potential tokens of `tokenizers.AddedToken` type to string. """ set_attr = {} for attr in self.SPECIAL_TOKENS_ATTRIBUTES: attr_value = getattr(self, attr) if attr_value: set_attr[attr] = attr_value return set_attr @property def special_tokens_map_extended(self) -> Dict[str, Union[str, AddedToken, List[Union[str, AddedToken]]]]: """ `Dict[str, Union[str, tokenizers.AddedToken, List[Union[str, tokenizers.AddedToken]]]]`: A dictionary mapping special token class attributes (`cls_token`, `unk_token`, etc.) to their values (`'<unk>'`, `'<cls>'`, etc.). Don't convert tokens of `tokenizers.AddedToken` type to string so they can be used to control more finely how special tokens are tokenized. """ set_attr = {} for attr in self.SPECIAL_TOKENS_ATTRIBUTES: attr_value = self._special_tokens_map[attr] if attr_value: set_attr[attr] = attr_value return set_attr @property def all_special_tokens_extended(self) -> List[Union[str, AddedToken]]: """ `List[Union[str, tokenizers.AddedToken]]`: All the special tokens (`'<unk>'`, `'<cls>'`, etc.), the order has nothing to do with the index of each tokens. If you want to know the correct indices, check `self.added_tokens_encoder`. We can't create an order anymore as the keys are `AddedTokens` and not `Strings`. Don't convert tokens of `tokenizers.AddedToken` type to string so they can be used to control more finely how special tokens are tokenized. """ all_tokens = [] seen = set() for value in self.special_tokens_map_extended.values(): if isinstance(value, (list, tuple)): tokens_to_add = [token for token in value if str(token) not in seen] else: tokens_to_add = [value] if str(value) not in seen else [] seen.update(map(str, tokens_to_add)) all_tokens.extend(tokens_to_add) return all_tokens @property def all_special_tokens(self) -> List[str]: """ `List[str]`: A list of the unique special tokens (`'<unk>'`, `'<cls>'`, ..., etc.). Convert tokens of `tokenizers.AddedToken` type to string. """ all_toks = [str(s) for s in self.all_special_tokens_extended] return all_toks @property def all_special_ids(self) -> List[int]: """ `List[int]`: List the ids of the special tokens(`'<unk>'`, `'<cls>'`, etc.) mapped to class attributes. """ all_toks = self.all_special_tokens all_ids = self.convert_tokens_to_ids(all_toks) return all_ids def _set_model_specific_special_tokens(self, special_tokens: List[str]): """ Adds new special tokens to the "SPECIAL_TOKENS_ATTRIBUTES" list which will be part of "self.special_tokens" and saved as a special token in tokenizer's config. This allows us to dynamically add new model-type specific tokens after initilizing the tokenizer. For example: if the model tokenizers is multimodal, we can support special image or audio tokens. """ self.SPECIAL_TOKENS_ATTRIBUTES = self.SPECIAL_TOKENS_ATTRIBUTES + list(special_tokens.keys()) for key, value in special_tokens.items(): if isinstance(value, (str, AddedToken)): self._special_tokens_map[key] = value else: raise TypeError(f"Special token {key} has to be either str or AddedToken but got: {type(value)}") ENCODE_KWARGS_DOCSTRING = r""" add_special_tokens (`bool`, *optional*, defaults to `True`): Whether or not to add special tokens when encoding the sequences. This will use the underlying `PretrainedTokenizerBase.build_inputs_with_special_tokens` function, which defines which tokens are automatically added to the input ids. This is usefull if you want to add `bos` or `eos` tokens automatically. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. stride (`int`, *optional*, defaults to 0): If set to a number along with `max_length`, the overflowing tokens returned when `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens. is_split_into_words (`bool`, *optional*, defaults to `False`): Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. Requires `padding` to be activated. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta). padding_side (`str`, *optional*): The side on which the model should have padding applied. Should be selected between ['right', 'left']. Default value is picked from the class attribute of the same name. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. """ ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r""" return_token_type_ids (`bool`, *optional*): Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are token type IDs?](../glossary#token-type-ids) return_attention_mask (`bool`, *optional*): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) return_overflowing_tokens (`bool`, *optional*, defaults to `False`): Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead of returning overflowing tokens. return_special_tokens_mask (`bool`, *optional*, defaults to `False`): Whether or not to return special tokens mask information. return_offsets_mapping (`bool`, *optional*, defaults to `False`): Whether or not to return `(char_start, char_end)` for each token. This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using Python's tokenizer, this method will raise `NotImplementedError`. return_length (`bool`, *optional*, defaults to `False`): Whether or not to return the lengths of the encoded inputs. verbose (`bool`, *optional*, defaults to `True`): Whether or not to print more information and warnings. **kwargs: passed to the `self.tokenize()` method Return: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. [What are input IDs?](../glossary#input-ids) - **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or if *"token_type_ids"* is in `self.model_input_names`). [What are token type IDs?](../glossary#token-type-ids) - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`). [What are attention masks?](../glossary#attention-mask) - **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and `return_overflowing_tokens=True`). - **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and `return_overflowing_tokens=True`). - **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`). - **length** -- The length of the inputs (when `return_length=True`) """ INIT_TOKENIZER_DOCSTRING = r""" Class attributes (overridden by derived classes) - **vocab_files_names** (`Dict[str, str]`) -- A dictionary with, as keys, the `__init__` keyword name of each vocabulary file required by the model, and as associated values, the filename for saving the associated file (string). - **pretrained_vocab_files_map** (`Dict[str, Dict[str, str]]`) -- A dictionary of dictionaries, with the high-level keys being the `__init__` keyword name of each vocabulary file required by the model, the low-level being the `short-cut-names` of the pretrained models with, as associated values, the `url` to the associated pretrained vocabulary file. - **model_input_names** (`List[str]`) -- A list of inputs expected in the forward pass of the model. - **padding_side** (`str`) -- The default value for the side on which the model should have padding applied. Should be `'right'` or `'left'`. - **truncation_side** (`str`) -- The default value for the side on which the model should have truncation applied. Should be `'right'` or `'left'`. Args: model_max_length (`int`, *optional*): The maximum length (in number of tokens) for the inputs to the transformer model. When the tokenizer is loaded with [`~tokenization_utils_base.PreTrainedTokenizerBase.from_pretrained`], this will be set to the value stored for the associated model in `max_model_input_sizes` (see above). If no value is provided, will default to VERY_LARGE_INTEGER (`int(1e30)`). padding_side (`str`, *optional*): The side on which the model should have padding applied. Should be selected between ['right', 'left']. Default value is picked from the class attribute of the same name. truncation_side (`str`, *optional*): The side on which the model should have truncation applied. Should be selected between ['right', 'left']. Default value is picked from the class attribute of the same name. chat_template (`str`, *optional*): A Jinja template string that will be used to format lists of chat messages. See https://huggingface.co/docs/transformers/chat_templating for a full description. model_input_names (`List[string]`, *optional*): The list of inputs accepted by the forward pass of the model (like `"token_type_ids"` or `"attention_mask"`). Default value is picked from the class attribute of the same name. bos_token (`str` or `tokenizers.AddedToken`, *optional*): A special token representing the beginning of a sentence. Will be associated to `self.bos_token` and `self.bos_token_id`. eos_token (`str` or `tokenizers.AddedToken`, *optional*): A special token representing the end of a sentence. Will be associated to `self.eos_token` and `self.eos_token_id`. unk_token (`str` or `tokenizers.AddedToken`, *optional*): A special token representing an out-of-vocabulary token. Will be associated to `self.unk_token` and `self.unk_token_id`. sep_token (`str` or `tokenizers.AddedToken`, *optional*): A special token separating two different sentences in the same input (used by BERT for instance). Will be associated to `self.sep_token` and `self.sep_token_id`. pad_token (`str` or `tokenizers.AddedToken`, *optional*): A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by attention mechanisms or loss computation. Will be associated to `self.pad_token` and `self.pad_token_id`. cls_token (`str` or `tokenizers.AddedToken`, *optional*): A special token representing the class of the input (used by BERT for instance). Will be associated to `self.cls_token` and `self.cls_token_id`. mask_token (`str` or `tokenizers.AddedToken`, *optional*): A special token representing a masked token (used by masked-language modeling pretraining objectives, like BERT). Will be associated to `self.mask_token` and `self.mask_token_id`. additional_special_tokens (tuple or list of `str` or `tokenizers.AddedToken`, *optional*): A tuple or a list of additional special tokens. Add them here to ensure they are skipped when decoding with `skip_special_tokens` is set to True. If they are not part of the vocabulary, they will be added at the end of the vocabulary. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`): Whether or not the model should cleanup the spaces that were added when splitting the input text during the tokenization process. split_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the special tokens should be split during the tokenization process. Passing will affect the internal state of the tokenizer. The default behavior is to not split special tokens. This means that if `<s>` is the `bos_token`, then `tokenizer.tokenize("<s>") = ['<s>`]. Otherwise, if `split_special_tokens=True`, then `tokenizer.tokenize("<s>")` will be give `['<','s', '>']`. """ @add_end_docstrings(INIT_TOKENIZER_DOCSTRING) class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin): """ Base class for [`PreTrainedTokenizer`] and [`PreTrainedTokenizerFast`]. Handles shared (mostly boiler plate) methods for those two classes. """ vocab_files_names: Dict[str, str] = {} pretrained_vocab_files_map: Dict[str, Dict[str, str]] = {} _auto_class: Optional[str] = None # first name has to correspond to main model input name # to make sure `tokenizer.pad(...)` works correctly model_input_names: List[str] = ["input_ids", "token_type_ids", "attention_mask"] padding_side: str = "right" truncation_side: str = "right" slow_tokenizer_class = None def __init__(self, **kwargs): # inputs and kwargs for saving and re-loading (see ``from_pretrained`` and ``save_pretrained``) self.init_inputs = () for key in kwargs: if hasattr(self, key) and callable(getattr(self, key)): raise AttributeError(f"{key} conflicts with the method {key} in {self.__class__.__name__}") self.init_kwargs = copy.deepcopy(kwargs) self.name_or_path = kwargs.pop("name_or_path", "") self._processor_class = kwargs.pop("processor_class", None) # For backward compatibility we fallback to set model_max_length from max_len if provided model_max_length = kwargs.pop("model_max_length", kwargs.pop("max_len", None)) self.model_max_length = model_max_length if model_max_length is not None else VERY_LARGE_INTEGER # Padding and truncation side are right by default and overridden in subclasses. If specified in the kwargs, it # is changed. self.padding_side = kwargs.pop("padding_side", self.padding_side) if self.padding_side not in ["right", "left"]: raise ValueError( f"Padding side should be selected between 'right' and 'left', current value: {self.padding_side}" ) self.truncation_side = kwargs.pop("truncation_side", self.truncation_side) if self.truncation_side not in ["right", "left"]: raise ValueError( f"Truncation side should be selected between 'right' and 'left', current value: {self.truncation_side}" ) self.model_input_names = kwargs.pop("model_input_names", self.model_input_names) # By default, cleaning tokenization spaces for both fast and slow tokenizers self.clean_up_tokenization_spaces = kwargs.pop("clean_up_tokenization_spaces", False) # By default, do not split special tokens for both fast and slow tokenizers self.split_special_tokens = kwargs.pop("split_special_tokens", False) self.deprecation_warnings = {} # Use to store when we have already noticed a deprecation warning (avoid overlogging). self._in_target_context_manager = False # Stores a Jinja template that formats chat histories into tokenizable strings self.chat_template = kwargs.pop("chat_template", None) if isinstance(self.chat_template, (list, tuple)): # Chat templates are stored as lists of dicts with fixed key names, # we reconstruct that into a single dict while loading them. self.chat_template = {template["name"]: template["template"] for template in self.chat_template} super().__init__(**kwargs) self.extra_special_tokens = kwargs.pop("extra_special_tokens", {}) self._set_model_specific_special_tokens(special_tokens=self.extra_special_tokens) @property def max_len_single_sentence(self) -> int: """ `int`: The maximum length of a sentence that can be fed to the model. """ return self.model_max_length - self.num_special_tokens_to_add(pair=False) @property def max_len_sentences_pair(self) -> int: """ `int`: The maximum combined length of a pair of sentences that can be fed to the model. """ return self.model_max_length - self.num_special_tokens_to_add(pair=True) @max_len_single_sentence.setter def max_len_single_sentence(self, value) -> int: # For backward compatibility, allow to try to setup 'max_len_single_sentence'. if value == self.model_max_length - self.num_special_tokens_to_add(pair=False) and self.verbose: if not self.deprecation_warnings.get("max_len_single_sentence", False): logger.warning( "Setting 'max_len_single_sentence' is now deprecated. This value is automatically set up." ) self.deprecation_warnings["max_len_single_sentence"] = True else: raise ValueError( "Setting 'max_len_single_sentence' is now deprecated. This value is automatically set up." ) @max_len_sentences_pair.setter def max_len_sentences_pair(self, value) -> int: # For backward compatibility, allow to try to setup 'max_len_sentences_pair'. if value == self.model_max_length - self.num_special_tokens_to_add(pair=True) and self.verbose: if not self.deprecation_warnings.get("max_len_sentences_pair", False): logger.warning( "Setting 'max_len_sentences_pair' is now deprecated. This value is automatically set up." ) self.deprecation_warnings["max_len_sentences_pair"] = True else: raise ValueError("Setting 'max_len_sentences_pair' is now deprecated. This value is automatically set up.") def _set_processor_class(self, processor_class: str): """Sets processor class as an attribute.""" self._processor_class = processor_class @property def added_tokens_decoder(self) -> Dict[int, AddedToken]: raise NotImplementedError() def __repr__(self) -> str: added_tokens_decoder_rep = "\n\t".join([f"{k}: {v.__repr__()}," for k, v in self.added_tokens_decoder.items()]) return ( f"{self.__class__.__name__}(name_or_path='{self.name_or_path}'," f" vocab_size={self.vocab_size}, model_max_length={self.model_max_length}, is_fast={self.is_fast}," f" padding_side='{self.padding_side}', truncation_side='{self.truncation_side}'," f" special_tokens={self.special_tokens_map}, clean_up_tokenization_spaces={self.clean_up_tokenization_spaces}," " added_tokens_decoder={\n\t" + added_tokens_decoder_rep + "\n}\n)" ) def __len__(self) -> int: raise NotImplementedError() def get_vocab(self) -> Dict[str, int]: """ Returns the vocabulary as a dictionary of token to index. `tokenizer.get_vocab()[token]` is equivalent to `tokenizer.convert_tokens_to_ids(token)` when `token` is in the vocab. Returns: `Dict[str, int]`: The vocabulary. """ raise NotImplementedError() def apply_chat_template( self, conversation: Union[List[Dict[str, str]], List[List[Dict[str, str]]]], tools: Optional[List[Union[Dict, Callable]]] = None, documents: Optional[List[Dict[str, str]]] = None, chat_template: Optional[str] = None, add_generation_prompt: bool = False, continue_final_message: bool = False, tokenize: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: bool = False, max_length: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_dict: bool = False, return_assistant_tokens_mask: bool = False, tokenizer_kwargs: Optional[Dict[str, Any]] = None, **kwargs, ) -> Union[str, List[int], List[str], List[List[int]], BatchEncoding]: """ Converts a list of dictionaries with `"role"` and `"content"` keys to a list of token ids. This method is intended for use with chat models, and will read the tokenizer's chat_template attribute to determine the format and control tokens to use when converting. Args: conversation (Union[List[Dict[str, str]], List[List[Dict[str, str]]]]): A list of dicts with "role" and "content" keys, representing the chat history so far. tools (`List[Dict]`, *optional*): A list of tools (callable functions) that will be accessible to the model. If the template does not support function calling, this argument will have no effect. Each tool should be passed as a JSON Schema, giving the name, description and argument types for the tool. See our [chat templating guide](https://huggingface.co/docs/transformers/main/en/chat_templating#automated-function-conversion-for-tool-use) for more information. documents (`List[Dict[str, str]]`, *optional*): A list of dicts representing documents that will be accessible to the model if it is performing RAG (retrieval-augmented generation). If the template does not support RAG, this argument will have no effect. We recommend that each document should be a dict containing "title" and "text" keys. Please see the RAG section of the [chat templating guide](https://huggingface.co/docs/transformers/main/en/chat_templating#arguments-for-RAG) for examples of passing documents with chat templates. chat_template (`str`, *optional*): A Jinja template to use for this conversion. It is usually not necessary to pass anything to this argument, as the model's template will be used by default. add_generation_prompt (bool, *optional*): If this is set, a prompt with the token(s) that indicate the start of an assistant message will be appended to the formatted output. This is useful when you want to generate a response from the model. Note that this argument will be passed to the chat template, and so it must be supported in the template for this argument to have any effect. continue_final_message (bool, *optional*): If this is set, the chat will be formatted so that the final message in the chat is open-ended, without any EOS tokens. The model will continue this message rather than starting a new one. This allows you to "prefill" part of the model's response for it. Cannot be used at the same time as `add_generation_prompt`. tokenize (`bool`, defaults to `True`): Whether to tokenize the output. If `False`, the output will be a string. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, defaults to `False`): Whether to truncate sequences at the maximum length. Has no effect if tokenize is `False`. max_length (`int`, *optional*): Maximum length (in tokens) to use for padding or truncation. Has no effect if tokenize is `False`. If not specified, the tokenizer's `max_length` attribute will be used as a default. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Has no effect if tokenize is `False`. Acceptable values are: - `'tf'`: Return TensorFlow `tf.Tensor` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. - `'jax'`: Return JAX `jnp.ndarray` objects. return_dict (`bool`, defaults to `False`): Whether to return a dictionary with named outputs. Has no effect if tokenize is `False`. tokenizer_kwargs (`Dict[str: Any]`, *optional*): Additional kwargs to pass to the tokenizer. return_assistant_tokens_mask (`bool`, defaults to `False`): Whether to return a mask of the assistant generated tokens. For tokens generated by the assistant, the mask will contain 1. For user and system tokens, the mask will contain 0. This functionality is only available for chat templates that support it via the `{% generation %}` keyword. **kwargs: Additional kwargs to pass to the template renderer. Will be accessible by the chat template. Returns: `Union[List[int], Dict]`: A list of token ids representing the tokenized chat so far, including control tokens. This output is ready to pass to the model, either directly or via methods like `generate()`. If `return_dict` is set, will return a dict of tokenizer outputs instead. """ if return_dict and not tokenize: raise ValueError( "`return_dict=True` is incompatible with `tokenize=False`, because there is no dict " "of tokenizer outputs to return." ) if return_assistant_tokens_mask and not return_dict: raise ValueError("`return_assistant_tokens_mask=True` is incompatible with `return_dict=False`") if tokenizer_kwargs is None: tokenizer_kwargs = {} chat_template = self.get_chat_template(chat_template, tools) if return_assistant_tokens_mask and not re.search(r"\{\%-?\s*generation\s*-?\%\}", chat_template): logger.warning_once( "return_assistant_tokens_mask==True but chat template does not contain `{% generation %}` keyword." ) # Compilation function uses a cache to avoid recompiling the same template compiled_template = _compile_jinja_template(chat_template) if isinstance(conversation, (list, tuple)) and ( isinstance(conversation[0], (list, tuple)) or hasattr(conversation[0], "messages") ): conversations = conversation is_batched = True else: conversations = [conversation] is_batched = False if continue_final_message: if add_generation_prompt: raise ValueError( "continue_final_message and add_generation_prompt are not compatible. Use continue_final_message when you want the model to continue the final message, and add_generation_prompt when you want to add a header that will prompt it to start a new assistant message instead." ) if return_assistant_tokens_mask: raise ValueError("continue_final_message is not compatible with return_assistant_tokens_mask.") # We accept either JSON schemas or functions for tools. If we get functions, we convert them to schemas if tools is not None: tool_schemas = [] for tool in tools: if isinstance(tool, dict): tool_schemas.append(tool) elif isfunction(tool): tool_schemas.append(get_json_schema(tool)) else: raise ValueError( "Tools should either be a JSON schema, or a callable function with type hints " "and a docstring suitable for auto-conversion to a schema." ) else: tool_schemas = None if documents is not None: for document in documents: if not isinstance(document, dict): raise TypeError("Documents should be a list of dicts with 'title' and 'text' keys!") rendered = [] all_generation_indices = [] template_kwargs = {**self.special_tokens_map, **kwargs} # kwargs overwrite special tokens if both are present for chat in conversations: if hasattr(chat, "messages"): # Indicates it's a Conversation object chat = chat.messages if return_assistant_tokens_mask: rendered_chat, generation_indices = _render_with_assistant_indices( compiled_template=compiled_template, messages=chat, tools=tool_schemas, documents=documents, add_generation_prompt=add_generation_prompt, **template_kwargs, ) all_generation_indices.append(generation_indices) else: rendered_chat = compiled_template.render( messages=chat, tools=tool_schemas, documents=documents, add_generation_prompt=add_generation_prompt, **template_kwargs, ) if continue_final_message: final_message = chat[-1]["content"] if isinstance(final_message, (list, tuple)): final_message = final_message[-1]["text"] try: rendered_chat = rendered_chat[: rendered_chat.rindex(final_message) + len(final_message)] except: # noqa: E722 # Some chat templates like Llama-3.1 trim messages before rendering, so we must do the same here. final_message = final_message.strip() rendered_chat = rendered_chat[: rendered_chat.rindex(final_message) + len(final_message)] rendered.append(rendered_chat) if not is_batched: rendered = rendered[0] if tokenize: out = self( rendered, padding=padding, truncation=truncation, max_length=max_length, add_special_tokens=False, return_tensors=return_tensors, **tokenizer_kwargs, ) if return_dict: if return_assistant_tokens_mask: assistant_masks = [] if is_batched or return_tensors: input_ids = out["input_ids"] else: input_ids = [out["input_ids"]] for i in range(len(input_ids)): current_mask = [0] * len(input_ids[i]) for assistant_start_char, assistant_end_char in all_generation_indices[i]: start_token = out.char_to_token(i, assistant_start_char) end_token = out.char_to_token(i, assistant_end_char - 1) if start_token is None: # start_token is out of bounds maybe due to truncation. break for token_id in range(start_token, end_token + 1 if end_token else len(input_ids[i])): current_mask[token_id] = 1 assistant_masks.append(current_mask) out["assistant_masks"] = assistant_masks if is_batched else assistant_masks[0] return out else: return out["input_ids"] else: return rendered def get_chat_template(self, chat_template: Optional[str] = None, tools: Optional[List[Dict]] = None) -> str: """ Retrieve the chat template string used for tokenizing chat messages. This template is used internally by the `apply_chat_template` method and can also be used externally to retrieve the model's chat template for better generation tracking. Args: chat_template (`str`, *optional*): A Jinja template or the name of a template to use for this conversion. It is usually not necessary to pass anything to this argument, as the model's template will be used by default. tools (`List[Dict]`, *optional*): A list of tools (callable functions) that will be accessible to the model. If the template does not support function calling, this argument will have no effect. Each tool should be passed as a JSON Schema, giving the name, description and argument types for the tool. See our [chat templating guide](https://huggingface.co/docs/transformers/main/en/chat_templating#automated-function-conversion-for-tool-use) for more information. Returns: `str`: The chat template string. """ # First, handle the cases when the model has a dict of multiple templates if isinstance(self.chat_template, dict): template_dict = self.chat_template if chat_template is not None and chat_template in template_dict: # The user can pass the name of a template to the chat template argument instead of an entire template chat_template = template_dict[chat_template] elif chat_template is None: if tools is not None and "tool_use" in template_dict: chat_template = template_dict["tool_use"] elif "default" in template_dict: chat_template = template_dict["default"] else: raise ValueError( "This model has multiple chat templates with no default specified! Please either pass a chat " "template or the name of the template you wish to use to the `chat_template` argument. Available " f"template names are {sorted(template_dict.keys())}." ) elif chat_template is None: # These are the cases when the model has a single template # priority: `chat_template` argument > `tokenizer.chat_template` if self.chat_template is not None: chat_template = self.chat_template else: raise ValueError( "Cannot use chat template functions because tokenizer.chat_template is not set and no template " "argument was passed! For information about writing templates and setting the " "tokenizer.chat_template attribute, please see the documentation at " "https://huggingface.co/docs/transformers/main/en/chat_templating" ) return chat_template @classmethod def from_pretrained( cls, pretrained_model_name_or_path: Union[str, os.PathLike], *init_inputs, cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, local_files_only: bool = False, token: Optional[Union[str, bool]] = None, revision: str = "main", trust_remote_code=False, **kwargs, ): r""" Instantiate a [`~tokenization_utils_base.PreTrainedTokenizerBase`] (or a derived class) from a predefined tokenizer. Args: pretrained_model_name_or_path (`str` or `os.PathLike`): Can be either: - A string, the *model id* of a predefined tokenizer hosted inside a model repo on huggingface.co. - A path to a *directory* containing vocabulary files required by the tokenizer, for instance saved using the [`~tokenization_utils_base.PreTrainedTokenizerBase.save_pretrained`] method, e.g., `./my_model_directory/`. - (**Deprecated**, not applicable to all derived classes) A path or url to a single saved vocabulary file (if and only if the tokenizer only requires a single vocabulary file like Bert or XLNet), e.g., `./my_model_directory/vocab.txt`. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download the vocabulary files and override the cached versions if they exist. resume_download: Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v5 of Transformers. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). local_files_only (`bool`, *optional*, defaults to `False`): Whether or not to only rely on local files and not to attempt to download any files. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. subfolder (`str`, *optional*): In case the relevant files are located inside a subfolder of the model repo on huggingface.co (e.g. for facebook/rag-token-base), specify it here. inputs (additional positional arguments, *optional*): Will be passed along to the Tokenizer `__init__` method. trust_remote_code (`bool`, *optional*, defaults to `False`): Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine. kwargs (additional keyword arguments, *optional*): Will be passed to the Tokenizer `__init__` method. Can be used to set special tokens like `bos_token`, `eos_token`, `unk_token`, `sep_token`, `pad_token`, `cls_token`, `mask_token`, `additional_special_tokens`. See parameters in the `__init__` for more details. <Tip> Passing `token=True` is required when you want to use a private model. </Tip> Examples: ```python # We can't instantiate directly the base class *PreTrainedTokenizerBase* so let's show our examples on a derived class: BertTokenizer # Download vocabulary from huggingface.co and cache. tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased") # Download vocabulary from huggingface.co (user-uploaded) and cache. tokenizer = BertTokenizer.from_pretrained("dbmdz/bert-base-german-cased") # If vocabulary files are in a directory (e.g. tokenizer was saved using *save_pretrained('./test/saved_model/')*) tokenizer = BertTokenizer.from_pretrained("./test/saved_model/") # If the tokenizer uses a single vocabulary file, you can point directly to this file tokenizer = BertTokenizer.from_pretrained("./test/saved_model/my_vocab.txt") # You can link tokens to special vocabulary when instantiating tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased", unk_token="<unk>") # You should be sure '<unk>' is in the vocabulary when doing that. # Otherwise use tokenizer.add_special_tokens({'unk_token': '<unk>'}) instead) assert tokenizer.unk_token == "<unk>" ```""" resume_download = kwargs.pop("resume_download", None) proxies = kwargs.pop("proxies", None) use_auth_token = kwargs.pop("use_auth_token", None) subfolder = kwargs.pop("subfolder", None) from_pipeline = kwargs.pop("_from_pipeline", None) from_auto_class = kwargs.pop("_from_auto", False) commit_hash = kwargs.pop("_commit_hash", None) gguf_file = kwargs.get("gguf_file", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) token = use_auth_token user_agent = {"file_type": "tokenizer", "from_auto_class": from_auto_class, "is_fast": "Fast" in cls.__name__} if from_pipeline is not None: user_agent["using_pipeline"] = from_pipeline if is_offline_mode() and not local_files_only: logger.info("Offline mode: forcing local_files_only=True") local_files_only = True pretrained_model_name_or_path = str(pretrained_model_name_or_path) vocab_files = {} init_configuration = {} is_local = os.path.isdir(pretrained_model_name_or_path) single_file_id = None if os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): if len(cls.vocab_files_names) > 1 and not gguf_file: raise ValueError( f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is not " "supported for this tokenizer. Use a model identifier or the path to a directory instead." ) warnings.warn( f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is deprecated and " "won't be possible anymore in v5. Use a model identifier or the path to a directory instead.", FutureWarning, ) file_id = list(cls.vocab_files_names.keys())[0] vocab_files[file_id] = pretrained_model_name_or_path single_file_id = file_id else: if gguf_file: vocab_files["vocab_file"] = gguf_file else: # At this point pretrained_model_name_or_path is either a directory or a model identifier name additional_files_names = { "added_tokens_file": ADDED_TOKENS_FILE, # kept only for legacy "special_tokens_map_file": SPECIAL_TOKENS_MAP_FILE, # kept only for legacy "tokenizer_config_file": TOKENIZER_CONFIG_FILE, # tokenizer_file used to initialize a slow from a fast. Properly copy the `addedTokens` instead of adding in random orders "tokenizer_file": FULL_TOKENIZER_FILE, "chat_template_file": CHAT_TEMPLATE_FILE, } vocab_files = {**cls.vocab_files_names, **additional_files_names} if "tokenizer_file" in vocab_files: # Try to get the tokenizer config to see if there are versioned tokenizer files. fast_tokenizer_file = FULL_TOKENIZER_FILE resolved_config_file = cached_file( pretrained_model_name_or_path, TOKENIZER_CONFIG_FILE, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, token=token, revision=revision, local_files_only=local_files_only, subfolder=subfolder, user_agent=user_agent, _raise_exceptions_for_gated_repo=False, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False, _commit_hash=commit_hash, ) commit_hash = extract_commit_hash(resolved_config_file, commit_hash) if resolved_config_file is not None: with open(resolved_config_file, encoding="utf-8") as reader: tokenizer_config = json.load(reader) if "fast_tokenizer_files" in tokenizer_config: fast_tokenizer_file = get_fast_tokenizer_file(tokenizer_config["fast_tokenizer_files"]) vocab_files["tokenizer_file"] = fast_tokenizer_file # Get files from url, cache, or disk depending on the case resolved_vocab_files = {} unresolved_files = [] for file_id, file_path in vocab_files.items(): if file_path is None: resolved_vocab_files[file_id] = None elif single_file_id == file_id: if os.path.isfile(file_path): resolved_vocab_files[file_id] = file_path elif is_remote_url(file_path): resolved_vocab_files[file_id] = download_url(file_path, proxies=proxies) else: resolved_vocab_files[file_id] = cached_file( pretrained_model_name_or_path, file_path, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, token=token, user_agent=user_agent, revision=revision, subfolder=subfolder, _raise_exceptions_for_gated_repo=False, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False, _commit_hash=commit_hash, ) commit_hash = extract_commit_hash(resolved_vocab_files[file_id], commit_hash) if len(unresolved_files) > 0: logger.info( f"Can't load following files from cache: {unresolved_files} and cannot check if these " "files are necessary for the tokenizer to operate." ) # If one passes a GGUF file path to `gguf_file` there is no need for this check as the tokenizer will be # loaded directly from the GGUF file. if all(full_file_name is None for full_file_name in resolved_vocab_files.values()) and not gguf_file: raise EnvironmentError( f"Can't load tokenizer for '{pretrained_model_name_or_path}'. If you were trying to load it from " "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory " f"containing all relevant files for a {cls.__name__} tokenizer." ) for file_id, file_path in vocab_files.items(): if file_id not in resolved_vocab_files: continue if is_local: logger.info(f"loading file {file_path}") else: logger.info(f"loading file {file_path} from cache at {resolved_vocab_files[file_id]}") return cls._from_pretrained( resolved_vocab_files, pretrained_model_name_or_path, init_configuration, *init_inputs, token=token, cache_dir=cache_dir, local_files_only=local_files_only, _commit_hash=commit_hash, _is_local=is_local, trust_remote_code=trust_remote_code, **kwargs, ) @classmethod def _from_pretrained( cls, resolved_vocab_files, pretrained_model_name_or_path, init_configuration, *init_inputs, token=None, cache_dir=None, local_files_only=False, _commit_hash=None, _is_local=False, trust_remote_code=False, **kwargs, ): # We instantiate fast tokenizers based on a slow tokenizer if we don't have access to the tokenizer.json # file or if `from_slow` is set to True. from_slow = kwargs.get("from_slow", False) gguf_file = kwargs.get("gguf_file", None) has_tokenizer_file = resolved_vocab_files.get("tokenizer_file", None) is not None # If one passes a GGUF file path to `gguf_file` there is no need for this check as the tokenizer will be # loaded directly from the GGUF file. if (from_slow or not has_tokenizer_file) and cls.slow_tokenizer_class is not None and not gguf_file: slow_tokenizer = (cls.slow_tokenizer_class)._from_pretrained( copy.deepcopy(resolved_vocab_files), pretrained_model_name_or_path, copy.deepcopy(init_configuration), *init_inputs, token=token, cache_dir=cache_dir, local_files_only=local_files_only, _commit_hash=_commit_hash, **(copy.deepcopy(kwargs)), ) else: slow_tokenizer = None # Prepare tokenizer initialization kwargs # Did we saved some inputs and kwargs to reload ? tokenizer_config_file = resolved_vocab_files.pop("tokenizer_config_file", None) if tokenizer_config_file is not None: with open(tokenizer_config_file, encoding="utf-8") as tokenizer_config_handle: init_kwargs = json.load(tokenizer_config_handle) # First attempt. We get tokenizer_class from tokenizer_config to check mismatch between tokenizers. config_tokenizer_class = init_kwargs.get("tokenizer_class") init_kwargs.pop("tokenizer_class", None) if not has_tokenizer_file: init_kwargs.pop("tokenizer_file", None) saved_init_inputs = init_kwargs.pop("init_inputs", ()) if not init_inputs: init_inputs = saved_init_inputs else: config_tokenizer_class = None init_kwargs = init_configuration # If an independent chat template file exists, it takes priority over template entries in the tokenizer config chat_template_file = resolved_vocab_files.pop("chat_template_file", None) if chat_template_file is not None: with open(chat_template_file) as chat_template_handle: init_kwargs["chat_template"] = chat_template_handle.read() # Clobbers any template in the config if not _is_local: if "auto_map" in init_kwargs: # For backward compatibility with odl format. if isinstance(init_kwargs["auto_map"], (tuple, list)): init_kwargs["auto_map"] = {"AutoTokenizer": init_kwargs["auto_map"]} init_kwargs["auto_map"] = add_model_info_to_auto_map( init_kwargs["auto_map"], pretrained_model_name_or_path ) if "custom_pipelines" in init_kwargs: init_kwargs["custom_pipelines"] = add_model_info_to_custom_pipelines( init_kwargs["custom_pipelines"], pretrained_model_name_or_path ) if config_tokenizer_class is None: # Matt: This entire block is only used to decide if the tokenizer class matches the class in the repo. # If not, it raises a warning, but otherwise continues. Since we mostly load tokenizers with # AutoTokenizer these days, it seems like a lot of work (and a source of bugs) for little gain. # Maybe we can just remove this entirely? from .models.auto.configuration_auto import AutoConfig # tests_ignore # Second attempt. If we have not yet found tokenizer_class, let's try to use the config. try: config = AutoConfig.from_pretrained( pretrained_model_name_or_path, token=token, cache_dir=cache_dir, local_files_only=local_files_only, trust_remote_code=trust_remote_code, _commit_hash=_commit_hash, ) config_tokenizer_class = config.tokenizer_class except (OSError, ValueError, KeyError): # skip if an error occurred. config = None if config_tokenizer_class is None: # Third attempt. If we have not yet found the original type of the tokenizer, # we are loading we see if we can infer it from the type of the configuration file from .models.auto.tokenization_auto import TOKENIZER_MAPPING_NAMES # tests_ignore if hasattr(config, "model_type"): model_type = config.model_type else: # Fallback: use pattern matching on the string. model_type = None for pattern in TOKENIZER_MAPPING_NAMES.keys(): if pattern in str(pretrained_model_name_or_path): model_type = pattern break if model_type is not None: config_tokenizer_class, config_tokenizer_class_fast = TOKENIZER_MAPPING_NAMES.get( model_type, (None, None) ) if config_tokenizer_class is None: config_tokenizer_class = config_tokenizer_class_fast if config_tokenizer_class is not None: if cls.__name__.replace("Fast", "") != config_tokenizer_class.replace("Fast", ""): logger.warning( "The tokenizer class you load from this checkpoint is not the same type as the class this" " function is called from. It may result in unexpected tokenization. \nThe tokenizer class you" f" load from this checkpoint is '{config_tokenizer_class}'. \nThe class this function is called" f" from is '{cls.__name__}'." ) # Update with newly provided kwargs init_kwargs.update(kwargs) # Merge resolved_vocab_files arguments in init_kwargs. added_tokens_file = resolved_vocab_files.pop("added_tokens_file", None) special_tokens_map_file = resolved_vocab_files.pop("special_tokens_map_file", None) for args_name, file_path in resolved_vocab_files.items(): if args_name not in init_kwargs: init_kwargs[args_name] = file_path tokenizer_file = resolved_vocab_files.pop("tokenizer_file", None) if slow_tokenizer is not None: init_kwargs["__slow_tokenizer"] = slow_tokenizer init_kwargs["name_or_path"] = pretrained_model_name_or_path #### Handle tokenizer serialization of added and special tokens added_tokens_decoder: Dict[int, AddedToken] = {} added_tokens_map: Dict[str, AddedToken] = {} # if we have info on the slow added tokens if "added_tokens_decoder" in init_kwargs: for idx, token in init_kwargs["added_tokens_decoder"].items(): if isinstance(token, dict): token = AddedToken(**token) if isinstance(token, AddedToken): added_tokens_decoder[int(idx)] = token added_tokens_map[str(token)] = token else: raise ValueError( f"Found a {token.__class__} in the saved `added_tokens_decoder`, should be a dictionary or an AddedToken instance" ) else: # begin legacy: read the added_tokens_file and update kwargs with special_tokens_map if modified if special_tokens_map_file is not None: with open(special_tokens_map_file, encoding="utf-8") as special_tokens_map_handle: special_tokens_map = json.load(special_tokens_map_handle) for key, value in special_tokens_map.items(): if key in kwargs and kwargs[key]: # This value has already been redefined by the kwargs # We keep this new value and ignore the one stored in the special_tokens_map_file continue if isinstance(value, dict): value["special"] = True value = AddedToken(**value) elif key == "additional_special_tokens" and isinstance(value, list): additional_special_tokens = init_kwargs.pop("additional_special_tokens", []) or [] for token in value: if isinstance(token, dict): token["special"] = True token = AddedToken(**token) if token not in additional_special_tokens: additional_special_tokens.append(token) value = additional_special_tokens init_kwargs[key] = value # slow -> slow|fast, legacy: convert the `"added_tokens.json"` file to `added_tokens_decoder`. # this is for legacy purpose. We don't add the tokens after init for efficiency. if added_tokens_file is not None: special_tokens = [] for key in cls.SPECIAL_TOKENS_ATTRIBUTES & init_kwargs.keys(): if init_kwargs[key] is not None: if key == "additional_special_tokens": special_tokens += [str(token) for token in init_kwargs[key]] else: special_tokens.append(str(init_kwargs[key])) with open(added_tokens_file, encoding="utf-8") as added_tokens_handle: added_tok_encoder = json.load(added_tokens_handle) for str_token, index in added_tok_encoder.items(): # if index not in added_tokens_decoder and str_token not in added_tokens_map: special = str_token in special_tokens added_tokens_decoder[index] = AddedToken( str_token, rstrip=False, lstrip=False, normalized=not special, special=special ) added_tokens_map[str(token)] = added_tokens_decoder[index] # allows converting a fast -> slow: add the `tokenizer.json`'s `"added_tokens"` to the slow tokenizer # if `tokenizer_config.json` is `None` if tokenizer_file is not None: # This is for slow so can be done before with open(tokenizer_file, encoding="utf-8") as tokenizer_file_handle: tokenizer_file_handle = json.load(tokenizer_file_handle) added_tokens = tokenizer_file_handle.pop("added_tokens") for serialized_tokens in added_tokens: idx = serialized_tokens.pop("id") added_tokens_decoder[idx] = AddedToken(**serialized_tokens) added_tokens_map[str(added_tokens_decoder[idx])] = added_tokens_decoder[idx] # end legacy # Passing AddedTokens and not strings to the class to prevent it from casting the string to a different AddedToken # convert {'__type': 'AddedToken', 'content': '<ent>', 'lstrip': False, 'normalized': True, ...} to AddedTokens init_kwargs["added_tokens_decoder"] = added_tokens_decoder init_kwargs = cls.convert_added_tokens(init_kwargs, save=False) for key in cls.SPECIAL_TOKENS_ATTRIBUTES & init_kwargs.keys(): if added_tokens_map != {} and init_kwargs[key] is not None: if key != "additional_special_tokens": init_kwargs[key] = added_tokens_map.get(str(init_kwargs[key]), init_kwargs[key]) # Instantiate the tokenizer. try: tokenizer = cls(*init_inputs, **init_kwargs) except import_protobuf_decode_error(): logger.info( "Unable to load tokenizer model from SPM, loading from TikToken will be attempted instead." "(Google protobuf error: Tried to load SPM model with non-SPM vocab file).", ) return False except RuntimeError as e: if "sentencepiece_processor.cc" in str(e): logger.info( "Unable to load tokenizer model from SPM, loading from TikToken will be attempted instead." "(SentencePiece RuntimeError: Tried to load SPM model with non-SPM vocab file).", ) return False except OSError: raise OSError( "Unable to load vocabulary from file. " "Please check that the provided vocabulary is accessible and not corrupted." ) if added_tokens_decoder != {} and max(list(added_tokens_decoder.keys())[-1], 0) > tokenizer.vocab_size: logger.info( "Special tokens have been added in the vocabulary, make sure the associated word embeddings are" " fine-tuned or trained." ) return tokenizer @staticmethod def _eventually_correct_t5_max_length(pretrained_model_name_or_path, max_model_length, init_max_model_length): # This method should be deleted in Transformers v5 # Its only purpose is to potentially throw a warning # that incorrectly defined max lengths of T5's tokenizer are used # which we will correct in Transformers v5. return max_model_length @classmethod def convert_added_tokens(cls, obj: Union[AddedToken, Any], save=False, add_type_field=True): if isinstance(obj, dict) and "__type" in obj and obj["__type"] == "AddedToken": obj.pop("__type") return AddedToken(**obj) if isinstance(obj, AddedToken) and save: obj = obj.__getstate__() if add_type_field: obj["__type"] = "AddedToken" else: # Don't save "special" for previous tokenizers obj.pop("special") return obj elif isinstance(obj, (list, tuple)): return [cls.convert_added_tokens(o, save=save, add_type_field=add_type_field) for o in obj] elif isinstance(obj, dict): return {k: cls.convert_added_tokens(v, save=save, add_type_field=add_type_field) for k, v in obj.items()} return obj def save_pretrained( self, save_directory: Union[str, os.PathLike], legacy_format: Optional[bool] = None, filename_prefix: Optional[str] = None, push_to_hub: bool = False, **kwargs, ) -> Tuple[str]: """ Save the full tokenizer state. This method make sure the full tokenizer can then be re-loaded using the [`~tokenization_utils_base.PreTrainedTokenizer.from_pretrained`] class method.. Warning,None This won't save modifications you may have applied to the tokenizer after the instantiation (for instance, modifying `tokenizer.do_lower_case` after creation). Args: save_directory (`str` or `os.PathLike`): The path to a directory where the tokenizer will be saved. legacy_format (`bool`, *optional*): Only applicable for a fast tokenizer. If unset (default), will save the tokenizer in the unified JSON format as well as in legacy format if it exists, i.e. with tokenizer specific vocabulary and a separate added_tokens files. If `False`, will only save the tokenizer in the unified JSON format. This format is incompatible with "slow" tokenizers (not powered by the *tokenizers* library), so the tokenizer will not be able to be loaded in the corresponding "slow" tokenizer. If `True`, will save the tokenizer in legacy format. If the "slow" tokenizer doesn't exits, a value error is raised. filename_prefix (`str`, *optional*): A prefix to add to the names of the files saved by the tokenizer. push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with `repo_id` (will default to the name of `save_directory` in your namespace). kwargs (`Dict[str, Any]`, *optional*): Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. Returns: A tuple of `str`: The files saved. """ use_auth_token = kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if kwargs.get("token", None) is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) kwargs["token"] = use_auth_token if os.path.isfile(save_directory): logger.error(f"Provided path ({save_directory}) should be a directory, not a file") return os.makedirs(save_directory, exist_ok=True) if push_to_hub: commit_message = kwargs.pop("commit_message", None) repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) repo_id = self._create_repo(repo_id, **kwargs) files_timestamps = self._get_files_timestamps(save_directory) special_tokens_map_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + SPECIAL_TOKENS_MAP_FILE ) tokenizer_config_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + TOKENIZER_CONFIG_FILE ) chat_template_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + CHAT_TEMPLATE_FILE ) tokenizer_config = copy.deepcopy(self.init_kwargs) # Let's save the init kwargs target_keys = set(self.init_kwargs.keys()) # Let's save the special tokens map (only the strings) target_keys.update(["model_max_length", "clean_up_tokenization_spaces"]) for k in target_keys: if hasattr(self, k): tokenizer_config[k] = getattr(self, k) # Let's make sure we properly save the special tokens tokenizer_config.update(self.special_tokens_map) if "extra_special_tokens" not in tokenizer_config: tokenizer_config["extra_special_tokens"] = self.extra_special_tokens tokenizer_config.update(self.extra_special_tokens) saved_raw_chat_template = False if self.chat_template is not None: if isinstance(self.chat_template, dict): # Chat template dicts are saved to the config as lists of dicts with fixed key names. # They will be reconstructed as a single dict during loading. # We're trying to discourage chat template dicts, and they are always # saved in the config, never as single files. tokenizer_config["chat_template"] = [{"name": k, "template": v} for k, v in self.chat_template.items()] elif kwargs.get("save_raw_chat_template", False): with open(chat_template_file, "w", encoding="utf-8") as f: f.write(self.chat_template) saved_raw_chat_template = True logger.info(f"chat template saved in {chat_template_file}") if "chat_template" in tokenizer_config: tokenizer_config.pop("chat_template") # To ensure it doesn't somehow end up in the config too else: tokenizer_config["chat_template"] = self.chat_template if len(self.init_inputs) > 0: tokenizer_config["init_inputs"] = copy.deepcopy(self.init_inputs) for file_id in self.vocab_files_names.keys(): tokenizer_config.pop(file_id, None) # no typefields, this way old fast and slow can load it tokenizer_config = self.convert_added_tokens(tokenizer_config, add_type_field=True, save=True) # Process added tokens seperatly: allows previous versions to ignore it! added_tokens = {} for key, value in self.added_tokens_decoder.items(): added_tokens[key] = value.__getstate__() tokenizer_config["added_tokens_decoder"] = added_tokens # Add tokenizer class to the tokenizer config to be able to reload it with from_pretrained tokenizer_class = self.__class__.__name__ # Remove the Fast at the end unless we have a special `PreTrainedTokenizerFast` if tokenizer_class.endswith("Fast") and tokenizer_class != "PreTrainedTokenizerFast": tokenizer_class = tokenizer_class[:-4] tokenizer_config["tokenizer_class"] = tokenizer_class if getattr(self, "_auto_map", None) is not None: tokenizer_config["auto_map"] = self._auto_map if getattr(self, "_processor_class", None) is not None: tokenizer_config["processor_class"] = self._processor_class # If we have a custom model, we copy the file defining it in the folder and set the attributes so it can be # loaded from the Hub. if self._auto_class is not None: custom_object_save(self, save_directory, config=tokenizer_config) # remove private information if "name_or_path" in tokenizer_config: tokenizer_config.pop("name_or_path") tokenizer_config.pop("special_tokens_map_file", None) tokenizer_config.pop("tokenizer_file", None) if "device_map" in tokenizer_config: tokenizer_config.pop("device_map") with open(tokenizer_config_file, "w", encoding="utf-8") as f: out_str = json.dumps(tokenizer_config, indent=2, sort_keys=True, ensure_ascii=False) + "\n" f.write(out_str) logger.info(f"tokenizer config file saved in {tokenizer_config_file}") # Sanitize AddedTokens in special_tokens_map # kept for forward compatibility, will be removed in transoformers 5. Typefields are not saved for FC, special should not be save either write_dict = self.convert_added_tokens(self.special_tokens_map_extended, save=True, add_type_field=False) with open(special_tokens_map_file, "w", encoding="utf-8") as f: out_str = json.dumps(write_dict, indent=2, sort_keys=True, ensure_ascii=False) + "\n" f.write(out_str) logger.info(f"Special tokens file saved in {special_tokens_map_file}") file_names = (tokenizer_config_file, special_tokens_map_file) if saved_raw_chat_template: file_names += (chat_template_file,) save_files = self._save_pretrained( save_directory=save_directory, file_names=file_names, legacy_format=legacy_format, filename_prefix=filename_prefix, ) if push_to_hub: self._upload_modified_files( save_directory, repo_id, files_timestamps, commit_message=commit_message, token=kwargs.get("token"), ) return save_files def _save_pretrained( self, save_directory: Union[str, os.PathLike], file_names: Tuple[str], legacy_format: Optional[bool] = None, filename_prefix: Optional[str] = None, ) -> Tuple[str]: """ Save a tokenizer using the slow-tokenizer/legacy format: vocabulary + added tokens. Fast tokenizers can also be saved in a unique JSON file containing {config + vocab + added-tokens} using the specific [`~tokenization_utils_fast.PreTrainedTokenizerFast._save_pretrained`] """ if legacy_format is False: raise ValueError( "Only fast tokenizers (instances of PreTrainedTokenizerFast) can be saved in non legacy format." ) save_directory = str(save_directory) added_tokens_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + ADDED_TOKENS_FILE ) # the new get_added_vocab() also returns special tokens and tokens that have an index < vocab_size added_vocab = {tok: index for tok, index in self.added_tokens_encoder.items() if index >= self.vocab_size} if added_vocab: with open(added_tokens_file, "w", encoding="utf-8") as f: out_str = json.dumps(added_vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n" f.write(out_str) logger.info(f"added tokens file saved in {added_tokens_file}") vocab_files = self.save_vocabulary(save_directory, filename_prefix=filename_prefix) return file_names + vocab_files + (added_tokens_file,) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: """ Save only the vocabulary of the tokenizer (vocabulary + added tokens). This method won't save the configuration and special token mappings of the tokenizer. Use [`~PreTrainedTokenizerFast._save_pretrained`] to save the whole state of the tokenizer. Args: save_directory (`str`): The directory in which to save the vocabulary. filename_prefix (`str`, *optional*): An optional prefix to add to the named of the saved files. Returns: `Tuple(str)`: Paths to the files saved. """ raise NotImplementedError def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> List[str]: """ Converts a string into a sequence of tokens, replacing unknown tokens with the `unk_token`. Args: text (`str`): The sequence to be encoded. pair (`str`, *optional*): A second sequence to be encoded with the first. add_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to add the special tokens associated with the corresponding model. kwargs (additional keyword arguments, *optional*): Will be passed to the underlying model specific encode method. See details in [`~PreTrainedTokenizerBase.__call__`] Returns: `List[str]`: The list of tokens. """ raise NotImplementedError @add_end_docstrings( ENCODE_KWARGS_DOCSTRING, """ **kwargs: Passed along to the `.tokenize()` method. """, """ Returns: `List[int]`, `torch.Tensor`, `tf.Tensor` or `np.ndarray`: The tokenized ids of the text. """, ) def encode( self, text: Union[TextInput, PreTokenizedInput, EncodedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, padding_side: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ) -> List[int]: """ Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary. Same as doing `self.convert_tokens_to_ids(self.tokenize(text))`. Args: text (`str`, `List[str]` or `List[int]`): The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). text_pair (`str`, `List[str]` or `List[int]`, *optional*): Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). """ encoded_inputs = self.encode_plus( text, text_pair=text_pair, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, padding_side=padding_side, return_tensors=return_tensors, **kwargs, ) return encoded_inputs["input_ids"] def num_special_tokens_to_add(self, pair: bool = False) -> int: raise NotImplementedError def _get_padding_truncation_strategies( self, padding=False, truncation=None, max_length=None, pad_to_multiple_of=None, verbose=True, **kwargs ): """ Find the correct padding/truncation strategy with backward compatibility for old arguments (truncation_strategy and pad_to_max_length) and behaviors. """ old_truncation_strategy = kwargs.pop("truncation_strategy", "do_not_truncate") old_pad_to_max_length = kwargs.pop("pad_to_max_length", False) # Backward compatibility for previous behavior, maybe we should deprecate it: # If you only set max_length, it activates truncation for max_length if max_length is not None and padding is False and truncation is None: if verbose: if not self.deprecation_warnings.get("Truncation-not-explicitly-activated", False): logger.warning( "Truncation was not explicitly activated but `max_length` is provided a specific value, please" " use `truncation=True` to explicitly truncate examples to max length. Defaulting to" " 'longest_first' truncation strategy. If you encode pairs of sequences (GLUE-style) with the" " tokenizer you can select this strategy more precisely by providing a specific strategy to" " `truncation`." ) self.deprecation_warnings["Truncation-not-explicitly-activated"] = True truncation = "longest_first" # Get padding strategy if padding is False and old_pad_to_max_length: if verbose: warnings.warn( "The `pad_to_max_length` argument is deprecated and will be removed in a future version, " "use `padding=True` or `padding='longest'` to pad to the longest sequence in the batch, or " "use `padding='max_length'` to pad to a max length. In this case, you can give a specific " "length with `max_length` (e.g. `max_length=45`) or leave max_length to None to pad to the " "maximal input size of the model (e.g. 512 for Bert).", FutureWarning, ) if max_length is None: padding_strategy = PaddingStrategy.LONGEST else: padding_strategy = PaddingStrategy.MAX_LENGTH elif padding is not False: if padding is True: if verbose: if max_length is not None and ( truncation is None or truncation is False or truncation == "do_not_truncate" ): warnings.warn( "`max_length` is ignored when `padding`=`True` and there is no truncation strategy. " "To pad to max length, use `padding='max_length'`." ) if old_pad_to_max_length is not False: warnings.warn("Though `pad_to_max_length` = `True`, it is ignored because `padding`=`True`.") padding_strategy = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(padding, PaddingStrategy): padding_strategy = PaddingStrategy(padding) elif isinstance(padding, PaddingStrategy): padding_strategy = padding else: padding_strategy = PaddingStrategy.DO_NOT_PAD # Get truncation strategy if truncation is None and old_truncation_strategy != "do_not_truncate": if verbose: warnings.warn( "The `truncation_strategy` argument is deprecated and will be removed in a future version, use" " `truncation=True` to truncate examples to a max length. You can give a specific length with" " `max_length` (e.g. `max_length=45`) or leave max_length to None to truncate to the maximal input" " size of the model (e.g. 512 for Bert). If you have pairs of inputs, you can give a specific" " truncation strategy selected among `truncation='only_first'` (will only truncate the first" " sentence in the pairs) `truncation='only_second'` (will only truncate the second sentence in the" " pairs) or `truncation='longest_first'` (will iteratively remove tokens from the longest sentence" " in the pairs).", FutureWarning, ) truncation_strategy = TruncationStrategy(old_truncation_strategy) elif truncation is not False and truncation is not None: if truncation is True: truncation_strategy = ( TruncationStrategy.LONGEST_FIRST ) # Default to truncate the longest sequences in pairs of inputs elif not isinstance(truncation, TruncationStrategy): truncation_strategy = TruncationStrategy(truncation) elif isinstance(truncation, TruncationStrategy): truncation_strategy = truncation else: truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: if self.model_max_length > LARGE_INTEGER: if verbose: if not self.deprecation_warnings.get("Asking-to-pad-to-max_length", False): logger.warning( "Asking to pad to max_length but no maximum length is provided and the model has no" " predefined maximum length. Default to no padding." ) self.deprecation_warnings["Asking-to-pad-to-max_length"] = True padding_strategy = PaddingStrategy.DO_NOT_PAD else: max_length = self.model_max_length if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE: if self.model_max_length > LARGE_INTEGER: if verbose: if not self.deprecation_warnings.get("Asking-to-truncate-to-max_length", False): logger.warning( "Asking to truncate to max_length but no maximum length is provided and the model has" " no predefined maximum length. Default to no truncation." ) self.deprecation_warnings["Asking-to-truncate-to-max_length"] = True truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE else: max_length = self.model_max_length # Test if we have a padding token if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.pad_token is None or self.pad_token_id < 0): raise ValueError( "Asking to pad but the tokenizer does not have a padding token. " "Please select a token to use as `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` " "or add a new pad token via `tokenizer.add_special_tokens({'pad_token': '[PAD]'})`." ) # Check that we will truncate to a multiple of pad_to_multiple_of if both are provided if ( truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and padding_strategy != PaddingStrategy.DO_NOT_PAD and pad_to_multiple_of is not None and max_length is not None and (max_length % pad_to_multiple_of != 0) ): raise ValueError( "Truncation and padding are both activated but " f"truncation length ({max_length}) is not a multiple of pad_to_multiple_of ({pad_to_multiple_of})." ) return padding_strategy, truncation_strategy, max_length, kwargs @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def __call__( self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, text_pair: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None, text_target: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, text_pair_target: Optional[ Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] ] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences. Args: text (`str`, `List[str]`, `List[List[str]]`, *optional*): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). text_pair (`str`, `List[str]`, `List[List[str]]`, *optional*): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). text_target (`str`, `List[str]`, `List[List[str]]`, *optional*): The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). text_pair_target (`str`, `List[str]`, `List[List[str]]`, *optional*): The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). """ # To avoid duplicating all_kwargs = { "add_special_tokens": add_special_tokens, "padding": padding, "truncation": truncation, "max_length": max_length, "stride": stride, "is_split_into_words": is_split_into_words, "pad_to_multiple_of": pad_to_multiple_of, "padding_side": padding_side, "return_tensors": return_tensors, "return_token_type_ids": return_token_type_ids, "return_attention_mask": return_attention_mask, "return_overflowing_tokens": return_overflowing_tokens, "return_special_tokens_mask": return_special_tokens_mask, "return_offsets_mapping": return_offsets_mapping, "return_length": return_length, "split_special_tokens": kwargs.pop("split_special_tokens", self.split_special_tokens), "verbose": verbose, } all_kwargs.update(kwargs) if text is None and text_target is None: raise ValueError("You need to specify either `text` or `text_target`.") if text is not None: # The context manager will send the inputs as normal texts and not text_target, but we shouldn't change the # input mode in this case. if not self._in_target_context_manager: self._switch_to_input_mode() encodings = self._call_one(text=text, text_pair=text_pair, **all_kwargs) if text_target is not None: self._switch_to_target_mode() target_encodings = self._call_one(text=text_target, text_pair=text_pair_target, **all_kwargs) # Leave back tokenizer in input mode self._switch_to_input_mode() if text_target is None: return encodings elif text is None: return target_encodings else: encodings["labels"] = target_encodings["input_ids"] return encodings def _call_one( self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]], text_pair: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, split_special_tokens: bool = False, **kwargs, ) -> BatchEncoding: # Input type checking for clearer error def _is_valid_text_input(t): if isinstance(t, str): # Strings are fine return True elif isinstance(t, (list, tuple)): # List are fine as long as they are... if len(t) == 0: # ... empty return True elif isinstance(t[0], str): # ... list of strings return True elif isinstance(t[0], (list, tuple)): # ... list with an empty list or with a list of strings return len(t[0]) == 0 or isinstance(t[0][0], str) else: return False else: return False if not _is_valid_text_input(text): raise ValueError( "text input must be of type `str` (single example), `List[str]` (batch or single pretokenized example) " "or `List[List[str]]` (batch of pretokenized examples)." ) if text_pair is not None and not _is_valid_text_input(text_pair): raise ValueError( "text input must be of type `str` (single example), `List[str]` (batch or single pretokenized example) " "or `List[List[str]]` (batch of pretokenized examples)." ) if is_split_into_words: is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple)) else: is_batched = isinstance(text, (list, tuple)) if is_batched: if isinstance(text_pair, str): raise TypeError( "when tokenizing batches of text, `text_pair` must be a list or tuple with the same length as" " `text`." ) if text_pair is not None and len(text) != len(text_pair): raise ValueError( f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:" f" {len(text_pair)}." ) batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text return self.batch_encode_plus( batch_text_or_text_pairs=batch_text_or_text_pairs, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, split_special_tokens=split_special_tokens, **kwargs, ) else: return self.encode_plus( text=text, text_pair=text_pair, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, split_special_tokens=split_special_tokens, **kwargs, ) @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def encode_plus( self, text: Union[TextInput, PreTokenizedInput, EncodedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Tokenize and prepare for the model a sequence or a pair of sequences. <Tip warning={true}> This method is deprecated, `__call__` should be used instead. </Tip> Args: text (`str`, `List[str]` or (for non-fast tokenizers) `List[int]`): The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). text_pair (`str`, `List[str]` or `List[int]`, *optional*): Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). """ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) return self._encode_plus( text=text, text_pair=text_pair, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, split_special_tokens=kwargs.pop("split_special_tokens", self.split_special_tokens), **kwargs, ) def _encode_plus( self, text: Union[TextInput, PreTokenizedInput, EncodedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, split_special_tokens: bool = False, **kwargs, ) -> BatchEncoding: raise NotImplementedError @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def batch_encode_plus( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], List[PreTokenizedInputPair], List[EncodedInput], List[EncodedInputPair], ], add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, split_special_tokens: bool = False, **kwargs, ) -> BatchEncoding: """ Tokenize and prepare for the model a list of sequences or a list of pairs of sequences. <Tip warning={true}> This method is deprecated, `__call__` should be used instead. </Tip> Args: batch_text_or_text_pairs (`List[str]`, `List[Tuple[str, str]]`, `List[List[str]]`, `List[Tuple[List[str], List[str]]]`, and for not-fast tokenizers, also `List[List[int]]`, `List[Tuple[List[int], List[int]]]`): Batch of sequences or pair of sequences to be encoded. This can be a list of string/string-sequences/int-sequences or a list of pair of string/string-sequences/int-sequence (see details in `encode_plus`). """ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) return self._batch_encode_plus( batch_text_or_text_pairs=batch_text_or_text_pairs, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, split_special_tokens=split_special_tokens, **kwargs, ) def _batch_encode_plus( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], List[PreTokenizedInputPair], List[EncodedInput], List[EncodedInputPair], ], add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, split_special_tokens: bool = False, **kwargs, ) -> BatchEncoding: raise NotImplementedError def pad( self, encoded_inputs: Union[ BatchEncoding, List[BatchEncoding], Dict[str, EncodedInput], Dict[str, List[EncodedInput]], List[Dict[str, EncodedInput]], ], padding: Union[bool, str, PaddingStrategy] = True, max_length: Optional[int] = None, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, verbose: bool = True, ) -> BatchEncoding: """ Pad a single encoded input or a batch of encoded inputs up to predefined length or to the max sequence length in the batch. Padding side (left/right) padding token ids are defined at the tokenizer level (with `self.padding_side`, `self.pad_token_id` and `self.pad_token_type_id`). Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding. <Tip> If the `encoded_inputs` passed are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the result will use the same type unless you provide a different tensor type with `return_tensors`. In the case of PyTorch tensors, you will lose the specific device of your tensors however. </Tip> Args: encoded_inputs ([`BatchEncoding`], list of [`BatchEncoding`], `Dict[str, List[int]]`, `Dict[str, List[List[int]]` or `List[Dict[str, List[int]]]`): Tokenized inputs. Can represent one input ([`BatchEncoding`] or `Dict[str, List[int]]`) or a batch of tokenized inputs (list of [`BatchEncoding`], *Dict[str, List[List[int]]]* or *List[Dict[str, List[int]]]*) so you can use this method during preprocessing as well as in a PyTorch Dataloader collate function. Instead of `List[int]` you can have tensors (numpy arrays, PyTorch tensors or TensorFlow tensors), see the note above for the return type. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths). max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta). padding_side (`str`, *optional*): The side on which the model should have padding applied. Should be selected between ['right', 'left']. Default value is picked from the class attribute of the same name. return_attention_mask (`bool`, *optional*): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. verbose (`bool`, *optional*, defaults to `True`): Whether or not to print more information and warnings. """ if self.__class__.__name__.endswith("Fast"): if not self.deprecation_warnings.get("Asking-to-pad-a-fast-tokenizer", False): logger.warning_advice( f"You're using a {self.__class__.__name__} tokenizer. Please note that with a fast tokenizer," " using the `__call__` method is faster than using a method to encode the text followed by a call" " to the `pad` method to get a padded encoding." ) self.deprecation_warnings["Asking-to-pad-a-fast-tokenizer"] = True # If we have a list of dicts, let's convert it in a dict of lists # We do this to allow using this method as a collate_fn function in PyTorch Dataloader if isinstance(encoded_inputs, (list, tuple)) and isinstance(encoded_inputs[0], Mapping): encoded_inputs = {key: [example[key] for example in encoded_inputs] for key in encoded_inputs[0].keys()} # The model's main input name, usually `input_ids`, has been passed for padding if self.model_input_names[0] not in encoded_inputs: raise ValueError( "You should supply an encoding or a list of encodings to this method " f"that includes {self.model_input_names[0]}, but you provided {list(encoded_inputs.keys())}" ) required_input = encoded_inputs[self.model_input_names[0]] if required_input is None or (isinstance(required_input, Sized) and len(required_input) == 0): if return_attention_mask: encoded_inputs["attention_mask"] = [] return encoded_inputs # If we have PyTorch/TF/NumPy tensors/arrays as inputs, we cast them as python objects # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch first_element = required_input[0] if isinstance(first_element, (list, tuple)): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. for item in required_input: if len(item) != 0: first_element = item[0] break # At this state, if `first_element` is still a list/tuple, it's an empty one so there is nothing to do. if not isinstance(first_element, (int, list, tuple)): if is_tf_tensor(first_element): return_tensors = "tf" if return_tensors is None else return_tensors elif is_torch_tensor(first_element): return_tensors = "pt" if return_tensors is None else return_tensors elif isinstance(first_element, np.ndarray): return_tensors = "np" if return_tensors is None else return_tensors else: raise ValueError( f"type of {first_element} unknown: {type(first_element)}. " "Should be one of a python, numpy, pytorch or tensorflow object." ) for key, value in encoded_inputs.items(): encoded_inputs[key] = to_py_obj(value) # Convert padding_strategy in PaddingStrategy padding_strategy, _, max_length, _ = self._get_padding_truncation_strategies( padding=padding, max_length=max_length, verbose=verbose ) required_input = encoded_inputs[self.model_input_names[0]] if required_input and not isinstance(required_input[0], (list, tuple)): encoded_inputs = self._pad( encoded_inputs, max_length=max_length, padding_strategy=padding_strategy, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask, ) return BatchEncoding(encoded_inputs, tensor_type=return_tensors) batch_size = len(required_input) assert all( len(v) == batch_size for v in encoded_inputs.values() ), "Some items in the output dictionary have a different batch size than others." if padding_strategy == PaddingStrategy.LONGEST: max_length = max(len(inputs) for inputs in required_input) padding_strategy = PaddingStrategy.MAX_LENGTH batch_outputs = {} for i in range(batch_size): inputs = {k: v[i] for k, v in encoded_inputs.items()} outputs = self._pad( inputs, max_length=max_length, padding_strategy=padding_strategy, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask, ) for key, value in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) return BatchEncoding(batch_outputs, tensor_type=return_tensors) def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create the token type IDs corresponding to the sequences passed. [What are token type IDs?](../glossary#token-type-ids) Should be overridden in a subclass if the model has a special way of building those. Args: token_ids_0 (`List[int]`): The first tokenized sequence. token_ids_1 (`List[int]`, *optional*): The second tokenized sequence. Returns: `List[int]`: The token type ids. """ if token_ids_1 is None: return len(token_ids_0) * [0] return [0] * len(token_ids_0) + [1] * len(token_ids_1) def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. This implementation does not add special tokens and this method should be overridden in a subclass. Args: token_ids_0 (`List[int]`): The first tokenized sequence. token_ids_1 (`List[int]`, *optional*): The second tokenized sequence. Returns: `List[int]`: The model input with special tokens. """ if token_ids_1 is None: return token_ids_0 return token_ids_0 + token_ids_1 @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def prepare_for_model( self, ids: List[int], pair_ids: Optional[List[int]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, prepend_batch_axis: bool = False, **kwargs, ) -> BatchEncoding: """ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens. Please Note, for *pair_ids* different than `None` and *truncation_strategy = longest_first* or `True`, it is not possible to return overflowing tokens. Such a combination of arguments will raise an error. Args: ids (`List[int]`): Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. pair_ids (`List[int]`, *optional*): Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. """ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) pair = bool(pair_ids is not None) len_ids = len(ids) len_pair_ids = len(pair_ids) if pair else 0 if return_token_type_ids and not add_special_tokens: raise ValueError( "Asking to return token_type_ids while setting add_special_tokens to False " "results in an undefined behavior. Please set add_special_tokens to True or " "set return_token_type_ids to None." ) if ( return_overflowing_tokens and truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is not None ): raise ValueError( "Not possible to return overflowing tokens for pair of sequences with the " "`longest_first`. Please select another truncation strategy than `longest_first`, " "for instance `only_second` or `only_first`." ) # Load from model defaults if return_token_type_ids is None: return_token_type_ids = "token_type_ids" in self.model_input_names if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names encoded_inputs = {} # Compute the total size of the returned encodings total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0) # Truncation: Handle max sequence length overflowing_tokens = [] if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length: ids, pair_ids, overflowing_tokens = self.truncate_sequences( ids, pair_ids=pair_ids, num_tokens_to_remove=total_len - max_length, truncation_strategy=truncation_strategy, stride=stride, ) if return_overflowing_tokens: encoded_inputs["overflowing_tokens"] = overflowing_tokens encoded_inputs["num_truncated_tokens"] = total_len - max_length # Add special tokens if add_special_tokens: sequence = self.build_inputs_with_special_tokens(ids, pair_ids) token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids) else: sequence = ids + pair_ids if pair else ids token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else []) # Build output dictionary encoded_inputs["input_ids"] = sequence if return_token_type_ids: encoded_inputs["token_type_ids"] = token_type_ids if return_special_tokens_mask: if add_special_tokens: encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids) else: encoded_inputs["special_tokens_mask"] = [0] * len(sequence) # Check lengths self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose) # Padding if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask: encoded_inputs = self.pad( encoded_inputs, max_length=max_length, padding=padding_strategy.value, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask, ) if return_length: encoded_inputs["length"] = len(encoded_inputs["input_ids"]) batch_outputs = BatchEncoding( encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis ) return batch_outputs def truncate_sequences( self, ids: List[int], pair_ids: Optional[List[int]] = None, num_tokens_to_remove: int = 0, truncation_strategy: Union[str, TruncationStrategy] = "longest_first", stride: int = 0, ) -> Tuple[List[int], List[int], List[int]]: """ Truncates a sequence pair in-place following the strategy. Args: ids (`List[int]`): Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. pair_ids (`List[int]`, *optional*): Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. num_tokens_to_remove (`int`, *optional*, defaults to 0): Number of tokens to remove using the truncation strategy. truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `'longest_first'`): The strategy to follow for truncation. Can be: - `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). stride (`int`, *optional*, defaults to 0): If set to a positive number, the overflowing tokens returned will contain some tokens from the main sequence returned. The value of this argument defines the number of additional tokens. Returns: `Tuple[List[int], List[int], List[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of overflowing tokens. Note: The *longest_first* strategy returns empty list of overflowing tokens if a pair of sequences (or a batch of pairs) is provided. """ if num_tokens_to_remove <= 0: return ids, pair_ids, [] if not isinstance(truncation_strategy, TruncationStrategy): truncation_strategy = TruncationStrategy(truncation_strategy) overflowing_tokens = [] if truncation_strategy == TruncationStrategy.ONLY_FIRST or ( truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is None ): if len(ids) > num_tokens_to_remove: window_len = min(len(ids), stride + num_tokens_to_remove) if self.truncation_side == "left": overflowing_tokens = ids[:window_len] ids = ids[num_tokens_to_remove:] elif self.truncation_side == "right": overflowing_tokens = ids[-window_len:] ids = ids[:-num_tokens_to_remove] else: raise ValueError(f"invalid truncation strategy: {self.truncation_side}, use 'left' or 'right'.") else: error_msg = ( f"We need to remove {num_tokens_to_remove} to truncate the input " f"but the first sequence has a length {len(ids)}. " ) if truncation_strategy == TruncationStrategy.ONLY_FIRST: error_msg = ( error_msg + "Please select another truncation strategy than " f"{truncation_strategy}, for instance 'longest_first' or 'only_second'." ) logger.error(error_msg) elif truncation_strategy == TruncationStrategy.LONGEST_FIRST: logger.warning( "Be aware, overflowing tokens are not returned for the setting you have chosen," f" i.e. sequence pairs with the '{TruncationStrategy.LONGEST_FIRST.value}' " "truncation strategy. So the returned list will always be empty even if some " "tokens have been removed." ) len_pair_ids = len(pair_ids) if pair_ids is not None else 0 len_ids = len(ids) first_remove = min(abs(len_pair_ids - len_ids), num_tokens_to_remove) second_remove = num_tokens_to_remove - first_remove if len_ids > len_pair_ids: ids_to_move = first_remove + second_remove // 2 pair_ids_to_move = second_remove - second_remove // 2 else: ids_to_move = second_remove // 2 pair_ids_to_move = first_remove + second_remove - (second_remove // 2) if self.truncation_side == "right": ids = ids[:-ids_to_move] if ids_to_move > 0 else ids pair_ids = pair_ids[:-pair_ids_to_move] if pair_ids is not None and pair_ids_to_move > 0 else pair_ids elif self.truncation_side == "left": ids = ids[ids_to_move:] pair_ids = pair_ids[pair_ids_to_move:] if pair_ids is not None else None else: raise ValueError(f"invalid truncation strategy:{self.truncation_side}") elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None: if len(pair_ids) > num_tokens_to_remove: window_len = min(len(pair_ids), stride + num_tokens_to_remove) if self.truncation_side == "right": overflowing_tokens = pair_ids[-window_len:] pair_ids = pair_ids[:-num_tokens_to_remove] elif self.truncation_side == "left": overflowing_tokens = pair_ids[:window_len] pair_ids = pair_ids[num_tokens_to_remove:] else: raise ValueError(f"invalid truncation strategy:{self.truncation_side}") else: logger.error( f"We need to remove {num_tokens_to_remove} to truncate the input " f"but the second sequence has a length {len(pair_ids)}. " f"Please select another truncation strategy than {truncation_strategy}, " "for instance 'longest_first' or 'only_first'." ) return (ids, pair_ids, overflowing_tokens) def _pad( self, encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], max_length: Optional[int] = None, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[bool] = None, return_attention_mask: Optional[bool] = None, ) -> dict: """ Pad encoded inputs (on left/right and up to predefined length or max length in the batch) Args: encoded_inputs: Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. padding_strategy: PaddingStrategy to use for padding. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - PaddingStrategy.DO_NOT_PAD: Do not pad The tokenizer padding sides are defined in `padding_side` argument: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta). padding_side: The side on which the model should have padding applied. Should be selected between ['right', 'left']. Default value is picked from the class attribute of the same name. return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics) """ # Load from model defaults if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names required_input = encoded_inputs[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: max_length = len(required_input) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length # Initialize attention mask if not present. if return_attention_mask and "attention_mask" not in encoded_inputs: encoded_inputs["attention_mask"] = [1] * len(required_input) if needs_to_be_padded: difference = max_length - len(required_input) padding_side = padding_side if padding_side is not None else self.padding_side if padding_side == "right": if return_attention_mask: encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = ( encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference ) if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference elif padding_side == "left": if return_attention_mask: encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[ "token_type_ids" ] if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input else: raise ValueError(f"Invalid padding strategy:{padding_side}") return encoded_inputs def convert_tokens_to_string(self, tokens: List[str]) -> str: """ Converts a sequence of tokens in a single string. The most simple way to do it is `" ".join(tokens)` but we often want to remove sub-word tokenization artifacts at the same time. Args: tokens (`List[str]`): The token to join in a string. Returns: `str`: The joined tokens. """ raise NotImplementedError def batch_decode( self, sequences: Union[List[int], List[List[int]], "np.ndarray", "torch.Tensor", "tf.Tensor"], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = None, **kwargs, ) -> List[str]: """ Convert a list of lists of token ids into a list of strings by calling decode. Args: sequences (`Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]`): List of tokenized input ids. Can be obtained using the `__call__` method. skip_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to remove special tokens in the decoding. clean_up_tokenization_spaces (`bool`, *optional*): Whether or not to clean up the tokenization spaces. If `None`, will default to `self.clean_up_tokenization_spaces`. kwargs (additional keyword arguments, *optional*): Will be passed to the underlying model specific decode method. Returns: `List[str]`: The list of decoded sentences. """ return [ self.decode( seq, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs, ) for seq in sequences ] def decode( self, token_ids: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = None, **kwargs, ) -> str: """ Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces. Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`. Args: token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`): List of tokenized input ids. Can be obtained using the `__call__` method. skip_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to remove special tokens in the decoding. clean_up_tokenization_spaces (`bool`, *optional*): Whether or not to clean up the tokenization spaces. If `None`, will default to `self.clean_up_tokenization_spaces`. kwargs (additional keyword arguments, *optional*): Will be passed to the underlying model specific decode method. Returns: `str`: The decoded sentence. """ # Convert inputs to python lists token_ids = to_py_obj(token_ids) return self._decode( token_ids=token_ids, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs, ) def _decode( self, token_ids: Union[int, List[int]], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = None, **kwargs, ) -> str: raise NotImplementedError def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods. Args: token_ids_0 (`List[int]`): List of ids of the first sequence. token_ids_1 (`List[int]`, *optional*): List of ids of the second sequence. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ assert already_has_special_tokens and token_ids_1 is None, ( "You cannot use ``already_has_special_tokens=False`` with this tokenizer. " "Please use a slow (full python) tokenizer to activate this argument. " "Or set `return_special_tokens_mask=True` when calling the encoding method " "to get the special tokens mask in any tokenizer. " ) all_special_ids = self.all_special_ids # cache the property special_tokens_mask = [1 if token in all_special_ids else 0 for token in token_ids_0] return special_tokens_mask @staticmethod def clean_up_tokenization(out_string: str) -> str: """ Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms. Args: out_string (`str`): The text to clean up. Returns: `str`: The cleaned-up string. """ out_string = ( out_string.replace(" .", ".") .replace(" ?", "?") .replace(" !", "!") .replace(" ,", ",") .replace(" ' ", "'") .replace(" n't", "n't") .replace(" 'm", "'m") .replace(" 's", "'s") .replace(" 've", "'ve") .replace(" 're", "'re") ) return out_string def _eventual_warn_about_too_long_sequence(self, ids: List[int], max_length: Optional[int], verbose: bool): """ Depending on the input and internal state we might trigger a warning about a sequence that is too long for its corresponding model Args: ids (`List[str]`): The ids produced by the tokenization max_length (`int`, *optional*): The max_length desired (does not trigger a warning if it is set) verbose (`bool`): Whether or not to print more information and warnings. """ if max_length is None and len(ids) > self.model_max_length and verbose: if not self.deprecation_warnings.get("sequence-length-is-longer-than-the-specified-maximum", False): logger.warning( "Token indices sequence length is longer than the specified maximum sequence length " f"for this model ({len(ids)} > {self.model_max_length}). Running this sequence through the model " "will result in indexing errors" ) self.deprecation_warnings["sequence-length-is-longer-than-the-specified-maximum"] = True def _switch_to_input_mode(self): """ Private method to put the tokenizer in input mode (when it has different modes for input/outputs) """ pass def _switch_to_target_mode(self): """ Private method to put the tokenizer in target mode (when it has different modes for input/outputs) """ pass @contextmanager def as_target_tokenizer(self): """ Temporarily sets the tokenizer for encoding the targets. Useful for tokenizer associated to sequence-to-sequence models that need a slightly different processing for the labels. """ warnings.warn( "`as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your " "labels by using the argument `text_target` of the regular `__call__` method (either in the same call as " "your input texts if you use the same keyword arguments, or in a separate call." ) self._switch_to_target_mode() self._in_target_context_manager = True yield self._in_target_context_manager = False self._switch_to_input_mode() @classmethod def register_for_auto_class(cls, auto_class="AutoTokenizer"): """ Register this class with a given auto class. This should only be used for custom tokenizers as the ones in the library are already mapped with `AutoTokenizer`. <Tip warning={true}> This API is experimental and may have some slight breaking changes in the next releases. </Tip> Args: auto_class (`str` or `type`, *optional*, defaults to `"AutoTokenizer"`): The auto class to register this new tokenizer with. """ if not isinstance(auto_class, str): auto_class = auto_class.__name__ import transformers.models.auto as auto_module if not hasattr(auto_module, auto_class): raise ValueError(f"{auto_class} is not a valid auto class.") cls._auto_class = auto_class def prepare_seq2seq_batch( self, src_texts: List[str], tgt_texts: Optional[List[str]] = None, max_length: Optional[int] = None, max_target_length: Optional[int] = None, padding: str = "longest", return_tensors: str = None, truncation: bool = True, **kwargs, ) -> BatchEncoding: """ Prepare model inputs for translation. For best performance, translate one sentence at a time. Arguments: src_texts (`List[str]`): List of documents to summarize or source language texts. tgt_texts (`list`, *optional*): List of summaries or target language texts. max_length (`int`, *optional*): Controls the maximum length for encoder inputs (documents to summarize or source language texts) If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. max_target_length (`int`, *optional*): Controls the maximum length of decoder inputs (target language texts or summaries) If left unset or set to `None`, this will use the max_length value. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `True`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). **kwargs: Additional keyword arguments passed along to `self.__call__`. Return: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_ids** -- List of token ids to be fed to the encoder. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model. - **labels** -- List of token ids for tgt_texts. The full set of keys `[input_ids, attention_mask, labels]`, will only be returned if tgt_texts is passed. Otherwise, input_ids, attention_mask will be the only keys. """ # docstyle-ignore formatted_warning = """ `prepare_seq2seq_batch` is deprecated and will be removed in version 5 of HuggingFace Transformers. Use the regular `__call__` method to prepare your inputs and targets. Here is a short example: model_inputs = tokenizer(src_texts, text_target=tgt_texts, ...) If you either need to use different keyword arguments for the source and target texts, you should do two calls like this: model_inputs = tokenizer(src_texts, ...) labels = tokenizer(text_target=tgt_texts, ...) model_inputs["labels"] = labels["input_ids"] See the documentation of your specific tokenizer for more details on the specific arguments to the tokenizer of choice. For a more complete example, see the implementation of `prepare_seq2seq_batch`. """ warnings.warn(formatted_warning, FutureWarning) # mBART-specific kwargs that should be ignored by other models. kwargs.pop("src_lang", None) kwargs.pop("tgt_lang", None) if max_length is None: max_length = self.model_max_length model_inputs = self( src_texts, add_special_tokens=True, return_tensors=return_tensors, max_length=max_length, padding=padding, truncation=truncation, **kwargs, ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: max_target_length = max_length with self.as_target_tokenizer(): labels = self( tgt_texts, add_special_tokens=True, return_tensors=return_tensors, padding=padding, max_length=max_target_length, truncation=truncation, **kwargs, ) model_inputs["labels"] = labels["input_ids"] return model_inputs def get_fast_tokenizer_file(tokenization_files: List[str]) -> str: """ Get the tokenization file to use for this version of transformers. Args: tokenization_files (`List[str]`): The list of available configuration files. Returns: `str`: The tokenization file to use. """ tokenizer_files_map = {} for file_name in tokenization_files: search = _re_tokenizer_file.search(file_name) if search is not None: v = search.groups()[0] tokenizer_files_map[v] = file_name available_versions = sorted(tokenizer_files_map.keys()) # Defaults to FULL_TOKENIZER_FILE and then try to look at some newer versions. tokenizer_file = FULL_TOKENIZER_FILE transformers_version = version.parse(__version__) for v in available_versions: if version.parse(v) <= transformers_version: tokenizer_file = tokenizer_files_map[v] else: # No point going further since the versions are sorted. break return tokenizer_file # To update the docstring, we need to copy the method, otherwise we change the original docstring. PreTrainedTokenizerBase.push_to_hub = copy_func(PreTrainedTokenizerBase.push_to_hub) if PreTrainedTokenizerBase.push_to_hub.__doc__ is not None: PreTrainedTokenizerBase.push_to_hub.__doc__ = PreTrainedTokenizerBase.push_to_hub.__doc__.format( object="tokenizer", object_class="AutoTokenizer", object_files="tokenizer files" )
transformers/src/transformers/tokenization_utils_base.py/0
{ "file_path": "transformers/src/transformers/tokenization_utils_base.py", "repo_id": "transformers", "token_count": 90322 }
155
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Doc utilities: Utilities related to documentation """ import functools import re import types def add_start_docstrings(*docstr): def docstring_decorator(fn): fn.__doc__ = "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "") return fn return docstring_decorator def add_start_docstrings_to_model_forward(*docstr): def docstring_decorator(fn): docstring = "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "") class_name = f"[`{fn.__qualname__.split('.')[0]}`]" intro = f" The {class_name} forward method, overrides the `__call__` special method." note = r""" <Tip> Although the recipe for forward pass needs to be defined within this function, one should call the [`Module`] instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them. </Tip> """ fn.__doc__ = intro + note + docstring return fn return docstring_decorator def add_end_docstrings(*docstr): def docstring_decorator(fn): fn.__doc__ = (fn.__doc__ if fn.__doc__ is not None else "") + "".join(docstr) return fn return docstring_decorator PT_RETURN_INTRODUCTION = r""" Returns: [`{full_output_type}`] or `tuple(torch.FloatTensor)`: A [`{full_output_type}`] or a tuple of `torch.FloatTensor` (if `return_dict=False` is passed or when `config.return_dict=False`) comprising various elements depending on the configuration ([`{config_class}`]) and inputs. """ TF_RETURN_INTRODUCTION = r""" Returns: [`{full_output_type}`] or `tuple(tf.Tensor)`: A [`{full_output_type}`] or a tuple of `tf.Tensor` (if `return_dict=False` is passed or when `config.return_dict=False`) comprising various elements depending on the configuration ([`{config_class}`]) and inputs. """ def _get_indent(t): """Returns the indentation in the first line of t""" search = re.search(r"^(\s*)\S", t) return "" if search is None else search.groups()[0] def _convert_output_args_doc(output_args_doc): """Convert output_args_doc to display properly.""" # Split output_arg_doc in blocks argument/description indent = _get_indent(output_args_doc) blocks = [] current_block = "" for line in output_args_doc.split("\n"): # If the indent is the same as the beginning, the line is the name of new arg. if _get_indent(line) == indent: if len(current_block) > 0: blocks.append(current_block[:-1]) current_block = f"{line}\n" else: # Otherwise it's part of the description of the current arg. # We need to remove 2 spaces to the indentation. current_block += f"{line[2:]}\n" blocks.append(current_block[:-1]) # Format each block for proper rendering for i in range(len(blocks)): blocks[i] = re.sub(r"^(\s+)(\S+)(\s+)", r"\1- **\2**\3", blocks[i]) blocks[i] = re.sub(r":\s*\n\s*(\S)", r" -- \1", blocks[i]) return "\n".join(blocks) def _prepare_output_docstrings(output_type, config_class, min_indent=None): """ Prepares the return part of the docstring using `output_type`. """ output_docstring = output_type.__doc__ # Remove the head of the docstring to keep the list of args only lines = output_docstring.split("\n") i = 0 while i < len(lines) and re.search(r"^\s*(Args|Parameters):\s*$", lines[i]) is None: i += 1 if i < len(lines): params_docstring = "\n".join(lines[(i + 1) :]) params_docstring = _convert_output_args_doc(params_docstring) else: raise ValueError( f"No `Args` or `Parameters` section is found in the docstring of `{output_type.__name__}`. Make sure it has " "docstring and contain either `Args` or `Parameters`." ) # Add the return introduction full_output_type = f"{output_type.__module__}.{output_type.__name__}" intro = TF_RETURN_INTRODUCTION if output_type.__name__.startswith("TF") else PT_RETURN_INTRODUCTION intro = intro.format(full_output_type=full_output_type, config_class=config_class) result = intro + params_docstring # Apply minimum indent if necessary if min_indent is not None: lines = result.split("\n") # Find the indent of the first nonempty line i = 0 while len(lines[i]) == 0: i += 1 indent = len(_get_indent(lines[i])) # If too small, add indentation to all nonempty lines if indent < min_indent: to_add = " " * (min_indent - indent) lines = [(f"{to_add}{line}" if len(line) > 0 else line) for line in lines] result = "\n".join(lines) return result FAKE_MODEL_DISCLAIMER = """ <Tip warning={true}> This example uses a random model as the real ones are all very big. To get proper results, you should use {real_checkpoint} instead of {fake_checkpoint}. If you get out-of-memory when loading that checkpoint, you can try adding `device_map="auto"` in the `from_pretrained` call. </Tip> """ PT_TOKEN_CLASSIFICATION_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer( ... "HuggingFace is a company based in Paris and New York", add_special_tokens=False, return_tensors="pt" ... ) >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_token_class_ids = logits.argmax(-1) >>> # Note that tokens are classified rather then input words which means that >>> # there might be more predicted token classes than words. >>> # Multiple token classes might account for the same word >>> predicted_tokens_classes = [model.config.id2label[t.item()] for t in predicted_token_class_ids[0]] >>> predicted_tokens_classes {expected_output} >>> labels = predicted_token_class_ids >>> loss = model(**inputs, labels=labels).loss >>> round(loss.item(), 2) {expected_loss} ``` """ PT_QUESTION_ANSWERING_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" >>> inputs = tokenizer(question, text, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> answer_start_index = outputs.start_logits.argmax() >>> answer_end_index = outputs.end_logits.argmax() >>> predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1] >>> tokenizer.decode(predict_answer_tokens, skip_special_tokens=True) {expected_output} >>> # target is "nice puppet" >>> target_start_index = torch.tensor([{qa_target_start_index}]) >>> target_end_index = torch.tensor([{qa_target_end_index}]) >>> outputs = model(**inputs, start_positions=target_start_index, end_positions=target_end_index) >>> loss = outputs.loss >>> round(loss.item(), 2) {expected_loss} ``` """ PT_SEQUENCE_CLASSIFICATION_SAMPLE = r""" Example of single-label classification: ```python >>> import torch >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_class_id = logits.argmax().item() >>> model.config.id2label[predicted_class_id] {expected_output} >>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)` >>> num_labels = len(model.config.id2label) >>> model = {model_class}.from_pretrained("{checkpoint}", num_labels=num_labels) >>> labels = torch.tensor([1]) >>> loss = model(**inputs, labels=labels).loss >>> round(loss.item(), 2) {expected_loss} ``` Example of multi-label classification: ```python >>> import torch >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}", problem_type="multi_label_classification") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_class_ids = torch.arange(0, logits.shape[-1])[torch.sigmoid(logits).squeeze(dim=0) > 0.5] >>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)` >>> num_labels = len(model.config.id2label) >>> model = {model_class}.from_pretrained( ... "{checkpoint}", num_labels=num_labels, problem_type="multi_label_classification" ... ) >>> labels = torch.sum( ... torch.nn.functional.one_hot(predicted_class_ids[None, :].clone(), num_classes=num_labels), dim=1 ... ).to(torch.float) >>> loss = model(**inputs, labels=labels).loss ``` """ PT_MASKED_LM_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> # retrieve index of {mask} >>> mask_token_index = (inputs.input_ids == tokenizer.mask_token_id)[0].nonzero(as_tuple=True)[0] >>> predicted_token_id = logits[0, mask_token_index].argmax(axis=-1) >>> tokenizer.decode(predicted_token_id) {expected_output} >>> labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] >>> # mask labels of non-{mask} tokens >>> labels = torch.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100) >>> outputs = model(**inputs, labels=labels) >>> round(outputs.loss.item(), 2) {expected_loss} ``` """ PT_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ``` """ PT_MULTIPLE_CHOICE_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." >>> choice0 = "It is eaten with a fork and a knife." >>> choice1 = "It is eaten while held in the hand." >>> labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 >>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="pt", padding=True) >>> outputs = model(**{{k: v.unsqueeze(0) for k, v in encoding.items()}}, labels=labels) # batch size is 1 >>> # the linear classifier still needs to be trained >>> loss = outputs.loss >>> logits = outputs.logits ``` """ PT_CAUSAL_LM_SAMPLE = r""" Example: ```python >>> import torch >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs, labels=inputs["input_ids"]) >>> loss = outputs.loss >>> logits = outputs.logits ``` """ PT_SPEECH_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import AutoProcessor, {model_class} >>> import torch >>> from datasets import load_dataset >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation", trust_remote_code=True) >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> processor = AutoProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) {expected_output} ``` """ PT_SPEECH_CTC_SAMPLE = r""" Example: ```python >>> from transformers import AutoProcessor, {model_class} >>> from datasets import load_dataset >>> import torch >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation", trust_remote_code=True) >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> processor = AutoProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_ids = torch.argmax(logits, dim=-1) >>> # transcribe speech >>> transcription = processor.batch_decode(predicted_ids) >>> transcription[0] {expected_output} >>> inputs["labels"] = processor(text=dataset[0]["text"], return_tensors="pt").input_ids >>> # compute loss >>> loss = model(**inputs).loss >>> round(loss.item(), 2) {expected_loss} ``` """ PT_SPEECH_SEQ_CLASS_SAMPLE = r""" Example: ```python >>> from transformers import AutoFeatureExtractor, {model_class} >>> from datasets import load_dataset >>> import torch >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation", trust_remote_code=True) >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> feature_extractor = AutoFeatureExtractor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = feature_extractor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_class_ids = torch.argmax(logits, dim=-1).item() >>> predicted_label = model.config.id2label[predicted_class_ids] >>> predicted_label {expected_output} >>> # compute loss - target_label is e.g. "down" >>> target_label = model.config.id2label[0] >>> inputs["labels"] = torch.tensor([model.config.label2id[target_label]]) >>> loss = model(**inputs).loss >>> round(loss.item(), 2) {expected_loss} ``` """ PT_SPEECH_FRAME_CLASS_SAMPLE = r""" Example: ```python >>> from transformers import AutoFeatureExtractor, {model_class} >>> from datasets import load_dataset >>> import torch >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation", trust_remote_code=True) >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> feature_extractor = AutoFeatureExtractor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = feature_extractor(dataset[0]["audio"]["array"], return_tensors="pt", sampling_rate=sampling_rate) >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> probabilities = torch.sigmoid(logits[0]) >>> # labels is a one-hot array of shape (num_frames, num_speakers) >>> labels = (probabilities > 0.5).long() >>> labels[0].tolist() {expected_output} ``` """ PT_SPEECH_XVECTOR_SAMPLE = r""" Example: ```python >>> from transformers import AutoFeatureExtractor, {model_class} >>> from datasets import load_dataset >>> import torch >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation", trust_remote_code=True) >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> feature_extractor = AutoFeatureExtractor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = feature_extractor( ... [d["array"] for d in dataset[:2]["audio"]], sampling_rate=sampling_rate, return_tensors="pt", padding=True ... ) >>> with torch.no_grad(): ... embeddings = model(**inputs).embeddings >>> embeddings = torch.nn.functional.normalize(embeddings, dim=-1).cpu() >>> # the resulting embeddings can be used for cosine similarity-based retrieval >>> cosine_sim = torch.nn.CosineSimilarity(dim=-1) >>> similarity = cosine_sim(embeddings[0], embeddings[1]) >>> threshold = 0.7 # the optimal threshold is dataset-dependent >>> if similarity < threshold: ... print("Speakers are not the same!") >>> round(similarity.item(), 2) {expected_output} ``` """ PT_VISION_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import AutoImageProcessor, {model_class} >>> import torch >>> from datasets import load_dataset >>> dataset = load_dataset("huggingface/cats-image", trust_remote_code=True) >>> image = dataset["test"]["image"][0] >>> image_processor = AutoImageProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = image_processor(image, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) {expected_output} ``` """ PT_VISION_SEQ_CLASS_SAMPLE = r""" Example: ```python >>> from transformers import AutoImageProcessor, {model_class} >>> import torch >>> from datasets import load_dataset >>> dataset = load_dataset("huggingface/cats-image", trust_remote_code=True) >>> image = dataset["test"]["image"][0] >>> image_processor = AutoImageProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = image_processor(image, return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> # model predicts one of the 1000 ImageNet classes >>> predicted_label = logits.argmax(-1).item() >>> print(model.config.id2label[predicted_label]) {expected_output} ``` """ PT_SAMPLE_DOCSTRINGS = { "SequenceClassification": PT_SEQUENCE_CLASSIFICATION_SAMPLE, "QuestionAnswering": PT_QUESTION_ANSWERING_SAMPLE, "TokenClassification": PT_TOKEN_CLASSIFICATION_SAMPLE, "MultipleChoice": PT_MULTIPLE_CHOICE_SAMPLE, "MaskedLM": PT_MASKED_LM_SAMPLE, "LMHead": PT_CAUSAL_LM_SAMPLE, "BaseModel": PT_BASE_MODEL_SAMPLE, "SpeechBaseModel": PT_SPEECH_BASE_MODEL_SAMPLE, "CTC": PT_SPEECH_CTC_SAMPLE, "AudioClassification": PT_SPEECH_SEQ_CLASS_SAMPLE, "AudioFrameClassification": PT_SPEECH_FRAME_CLASS_SAMPLE, "AudioXVector": PT_SPEECH_XVECTOR_SAMPLE, "VisionBaseModel": PT_VISION_BASE_MODEL_SAMPLE, "ImageClassification": PT_VISION_SEQ_CLASS_SAMPLE, } TF_TOKEN_CLASSIFICATION_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer( ... "HuggingFace is a company based in Paris and New York", add_special_tokens=False, return_tensors="tf" ... ) >>> logits = model(**inputs).logits >>> predicted_token_class_ids = tf.math.argmax(logits, axis=-1) >>> # Note that tokens are classified rather then input words which means that >>> # there might be more predicted token classes than words. >>> # Multiple token classes might account for the same word >>> predicted_tokens_classes = [model.config.id2label[t] for t in predicted_token_class_ids[0].numpy().tolist()] >>> predicted_tokens_classes {expected_output} ``` ```python >>> labels = predicted_token_class_ids >>> loss = tf.math.reduce_mean(model(**inputs, labels=labels).loss) >>> round(float(loss), 2) {expected_loss} ``` """ TF_QUESTION_ANSWERING_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" >>> inputs = tokenizer(question, text, return_tensors="tf") >>> outputs = model(**inputs) >>> answer_start_index = int(tf.math.argmax(outputs.start_logits, axis=-1)[0]) >>> answer_end_index = int(tf.math.argmax(outputs.end_logits, axis=-1)[0]) >>> predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1] >>> tokenizer.decode(predict_answer_tokens) {expected_output} ``` ```python >>> # target is "nice puppet" >>> target_start_index = tf.constant([{qa_target_start_index}]) >>> target_end_index = tf.constant([{qa_target_end_index}]) >>> outputs = model(**inputs, start_positions=target_start_index, end_positions=target_end_index) >>> loss = tf.math.reduce_mean(outputs.loss) >>> round(float(loss), 2) {expected_loss} ``` """ TF_SEQUENCE_CLASSIFICATION_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") >>> logits = model(**inputs).logits >>> predicted_class_id = int(tf.math.argmax(logits, axis=-1)[0]) >>> model.config.id2label[predicted_class_id] {expected_output} ``` ```python >>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)` >>> num_labels = len(model.config.id2label) >>> model = {model_class}.from_pretrained("{checkpoint}", num_labels=num_labels) >>> labels = tf.constant(1) >>> loss = model(**inputs, labels=labels).loss >>> round(float(loss), 2) {expected_loss} ``` """ TF_MASKED_LM_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="tf") >>> logits = model(**inputs).logits >>> # retrieve index of {mask} >>> mask_token_index = tf.where((inputs.input_ids == tokenizer.mask_token_id)[0]) >>> selected_logits = tf.gather_nd(logits[0], indices=mask_token_index) >>> predicted_token_id = tf.math.argmax(selected_logits, axis=-1) >>> tokenizer.decode(predicted_token_id) {expected_output} ``` ```python >>> labels = tokenizer("The capital of France is Paris.", return_tensors="tf")["input_ids"] >>> # mask labels of non-{mask} tokens >>> labels = tf.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100) >>> outputs = model(**inputs, labels=labels) >>> round(float(outputs.loss), 2) {expected_loss} ``` """ TF_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") >>> outputs = model(inputs) >>> last_hidden_states = outputs.last_hidden_state ``` """ TF_MULTIPLE_CHOICE_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." >>> choice0 = "It is eaten with a fork and a knife." >>> choice1 = "It is eaten while held in the hand." >>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="tf", padding=True) >>> inputs = {{k: tf.expand_dims(v, 0) for k, v in encoding.items()}} >>> outputs = model(inputs) # batch size is 1 >>> # the linear classifier still needs to be trained >>> logits = outputs.logits ``` """ TF_CAUSAL_LM_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") >>> outputs = model(inputs) >>> logits = outputs.logits ``` """ TF_SPEECH_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import AutoProcessor, {model_class} >>> from datasets import load_dataset >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation", trust_remote_code=True) >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> processor = AutoProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="tf") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) {expected_output} ``` """ TF_SPEECH_CTC_SAMPLE = r""" Example: ```python >>> from transformers import AutoProcessor, {model_class} >>> from datasets import load_dataset >>> import tensorflow as tf >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation", trust_remote_code=True) >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> processor = AutoProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="tf") >>> logits = model(**inputs).logits >>> predicted_ids = tf.math.argmax(logits, axis=-1) >>> # transcribe speech >>> transcription = processor.batch_decode(predicted_ids) >>> transcription[0] {expected_output} ``` ```python >>> inputs["labels"] = processor(text=dataset[0]["text"], return_tensors="tf").input_ids >>> # compute loss >>> loss = model(**inputs).loss >>> round(float(loss), 2) {expected_loss} ``` """ TF_VISION_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import AutoImageProcessor, {model_class} >>> from datasets import load_dataset >>> dataset = load_dataset("huggingface/cats-image", trust_remote_code=True) >>> image = dataset["test"]["image"][0] >>> image_processor = AutoImageProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = image_processor(image, return_tensors="tf") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) {expected_output} ``` """ TF_VISION_SEQ_CLASS_SAMPLE = r""" Example: ```python >>> from transformers import AutoImageProcessor, {model_class} >>> import tensorflow as tf >>> from datasets import load_dataset >>> dataset = load_dataset("huggingface/cats-image", trust_remote_code=True) >>> image = dataset["test"]["image"][0] >>> image_processor = AutoImageProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = image_processor(image, return_tensors="tf") >>> logits = model(**inputs).logits >>> # model predicts one of the 1000 ImageNet classes >>> predicted_label = int(tf.math.argmax(logits, axis=-1)) >>> print(model.config.id2label[predicted_label]) {expected_output} ``` """ TF_SAMPLE_DOCSTRINGS = { "SequenceClassification": TF_SEQUENCE_CLASSIFICATION_SAMPLE, "QuestionAnswering": TF_QUESTION_ANSWERING_SAMPLE, "TokenClassification": TF_TOKEN_CLASSIFICATION_SAMPLE, "MultipleChoice": TF_MULTIPLE_CHOICE_SAMPLE, "MaskedLM": TF_MASKED_LM_SAMPLE, "LMHead": TF_CAUSAL_LM_SAMPLE, "BaseModel": TF_BASE_MODEL_SAMPLE, "SpeechBaseModel": TF_SPEECH_BASE_MODEL_SAMPLE, "CTC": TF_SPEECH_CTC_SAMPLE, "VisionBaseModel": TF_VISION_BASE_MODEL_SAMPLE, "ImageClassification": TF_VISION_SEQ_CLASS_SAMPLE, } FLAX_TOKEN_CLASSIFICATION_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="jax") >>> outputs = model(**inputs) >>> logits = outputs.logits ``` """ FLAX_QUESTION_ANSWERING_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" >>> inputs = tokenizer(question, text, return_tensors="jax") >>> outputs = model(**inputs) >>> start_scores = outputs.start_logits >>> end_scores = outputs.end_logits ``` """ FLAX_SEQUENCE_CLASSIFICATION_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="jax") >>> outputs = model(**inputs) >>> logits = outputs.logits ``` """ FLAX_MASKED_LM_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="jax") >>> outputs = model(**inputs) >>> logits = outputs.logits ``` """ FLAX_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="jax") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ``` """ FLAX_MULTIPLE_CHOICE_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." >>> choice0 = "It is eaten with a fork and a knife." >>> choice1 = "It is eaten while held in the hand." >>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="jax", padding=True) >>> outputs = model(**{{k: v[None, :] for k, v in encoding.items()}}) >>> logits = outputs.logits ``` """ FLAX_CAUSAL_LM_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="np") >>> outputs = model(**inputs) >>> # retrieve logts for next token >>> next_token_logits = outputs.logits[:, -1] ``` """ FLAX_SAMPLE_DOCSTRINGS = { "SequenceClassification": FLAX_SEQUENCE_CLASSIFICATION_SAMPLE, "QuestionAnswering": FLAX_QUESTION_ANSWERING_SAMPLE, "TokenClassification": FLAX_TOKEN_CLASSIFICATION_SAMPLE, "MultipleChoice": FLAX_MULTIPLE_CHOICE_SAMPLE, "MaskedLM": FLAX_MASKED_LM_SAMPLE, "BaseModel": FLAX_BASE_MODEL_SAMPLE, "LMHead": FLAX_CAUSAL_LM_SAMPLE, } def filter_outputs_from_example(docstring, **kwargs): """ Removes the lines testing an output with the doctest syntax in a code sample when it's set to `None`. """ for key, value in kwargs.items(): if value is not None: continue doc_key = "{" + key + "}" docstring = re.sub(rf"\n([^\n]+)\n\s+{doc_key}\n", "\n", docstring) return docstring def add_code_sample_docstrings( *docstr, processor_class=None, checkpoint=None, output_type=None, config_class=None, mask="[MASK]", qa_target_start_index=14, qa_target_end_index=15, model_cls=None, modality=None, expected_output=None, expected_loss=None, real_checkpoint=None, revision=None, ): def docstring_decorator(fn): # model_class defaults to function's class if not specified otherwise model_class = fn.__qualname__.split(".")[0] if model_cls is None else model_cls if model_class[:2] == "TF": sample_docstrings = TF_SAMPLE_DOCSTRINGS elif model_class[:4] == "Flax": sample_docstrings = FLAX_SAMPLE_DOCSTRINGS else: sample_docstrings = PT_SAMPLE_DOCSTRINGS # putting all kwargs for docstrings in a dict to be used # with the `.format(**doc_kwargs)`. Note that string might # be formatted with non-existing keys, which is fine. doc_kwargs = { "model_class": model_class, "processor_class": processor_class, "checkpoint": checkpoint, "mask": mask, "qa_target_start_index": qa_target_start_index, "qa_target_end_index": qa_target_end_index, "expected_output": expected_output, "expected_loss": expected_loss, "real_checkpoint": real_checkpoint, "fake_checkpoint": checkpoint, "true": "{true}", # For <Tip warning={true}> syntax that conflicts with formatting. } if ("SequenceClassification" in model_class or "AudioClassification" in model_class) and modality == "audio": code_sample = sample_docstrings["AudioClassification"] elif "SequenceClassification" in model_class: code_sample = sample_docstrings["SequenceClassification"] elif "QuestionAnswering" in model_class: code_sample = sample_docstrings["QuestionAnswering"] elif "TokenClassification" in model_class: code_sample = sample_docstrings["TokenClassification"] elif "MultipleChoice" in model_class: code_sample = sample_docstrings["MultipleChoice"] elif "MaskedLM" in model_class or model_class in ["FlaubertWithLMHeadModel", "XLMWithLMHeadModel"]: code_sample = sample_docstrings["MaskedLM"] elif "LMHead" in model_class or "CausalLM" in model_class: code_sample = sample_docstrings["LMHead"] elif "CTC" in model_class: code_sample = sample_docstrings["CTC"] elif "AudioFrameClassification" in model_class: code_sample = sample_docstrings["AudioFrameClassification"] elif "XVector" in model_class and modality == "audio": code_sample = sample_docstrings["AudioXVector"] elif "Model" in model_class and modality == "audio": code_sample = sample_docstrings["SpeechBaseModel"] elif "Model" in model_class and modality == "vision": code_sample = sample_docstrings["VisionBaseModel"] elif "Model" in model_class or "Encoder" in model_class: code_sample = sample_docstrings["BaseModel"] elif "ImageClassification" in model_class: code_sample = sample_docstrings["ImageClassification"] else: raise ValueError(f"Docstring can't be built for model {model_class}") code_sample = filter_outputs_from_example( code_sample, expected_output=expected_output, expected_loss=expected_loss ) if real_checkpoint is not None: code_sample = FAKE_MODEL_DISCLAIMER + code_sample func_doc = (fn.__doc__ or "") + "".join(docstr) output_doc = "" if output_type is None else _prepare_output_docstrings(output_type, config_class) built_doc = code_sample.format(**doc_kwargs) if revision is not None: if re.match(r"^refs/pr/\\d+", revision): raise ValueError( f"The provided revision '{revision}' is incorrect. It should point to" " a pull request reference on the hub like 'refs/pr/6'" ) built_doc = built_doc.replace( f'from_pretrained("{checkpoint}")', f'from_pretrained("{checkpoint}", revision="{revision}")' ) fn.__doc__ = func_doc + output_doc + built_doc return fn return docstring_decorator def replace_return_docstrings(output_type=None, config_class=None): def docstring_decorator(fn): func_doc = fn.__doc__ lines = func_doc.split("\n") i = 0 while i < len(lines) and re.search(r"^\s*Returns?:\s*$", lines[i]) is None: i += 1 if i < len(lines): indent = len(_get_indent(lines[i])) lines[i] = _prepare_output_docstrings(output_type, config_class, min_indent=indent) func_doc = "\n".join(lines) else: raise ValueError( f"The function {fn} should have an empty 'Return:' or 'Returns:' in its docstring as placeholder, " f"current docstring is:\n{func_doc}" ) fn.__doc__ = func_doc return fn return docstring_decorator def copy_func(f): """Returns a copy of a function f.""" # Based on http://stackoverflow.com/a/6528148/190597 (Glenn Maynard) g = types.FunctionType(f.__code__, f.__globals__, name=f.__name__, argdefs=f.__defaults__, closure=f.__closure__) g = functools.update_wrapper(g, f) g.__kwdefaults__ = f.__kwdefaults__ return g
transformers/src/transformers/utils/doc.py/0
{ "file_path": "transformers/src/transformers/utils/doc.py", "repo_id": "transformers", "token_count": 15616 }
156
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class ImageProcessingMixin(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class BaseImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ImageFeatureExtractionMixin(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class AriaImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class BeitFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class BeitImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class BitImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class BlipImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class BridgeTowerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ChameleonImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ChineseCLIPFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ChineseCLIPImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class CLIPFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class CLIPImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ConditionalDetrFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ConditionalDetrImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ConvNextFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ConvNextImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DeformableDetrFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DeformableDetrImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DeiTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DeiTImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DetaImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class EfficientFormerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class TvltImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ViTHybridImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DetrFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DetrImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DonutFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DonutImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DPTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DPTImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class EfficientNetImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class Emu3ImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class FlavaFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class FlavaImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class FlavaProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class FuyuImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class FuyuProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class GLPNFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class GLPNImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class GroundingDinoImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class IdeficsImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class Idefics2ImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class Idefics3ImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ImageGPTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ImageGPTImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class InstructBlipVideoImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LayoutLMv2FeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LayoutLMv2ImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LayoutLMv3FeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LayoutLMv3ImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LevitFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LevitImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LlavaImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LlavaNextImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LlavaNextVideoImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LlavaOnevisionImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LlavaOnevisionVideoProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class Mask2FormerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MaskFormerFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MaskFormerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MllamaImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MobileNetV1FeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MobileNetV1ImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MobileNetV2FeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MobileNetV2ImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MobileViTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MobileViTImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class NougatImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class OneFormerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class Owlv2ImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class OwlViTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class OwlViTImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class PerceiverFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class PerceiverImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class Pix2StructImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class PixtralImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class PoolFormerFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class PoolFormerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class PvtImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class Qwen2_5_VLImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class Qwen2VLImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class RTDetrImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class SamImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class SegformerFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class SegformerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class SegGptImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class SiglipImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class SuperGlueImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class SuperPointImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class Swin2SRImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class TextNetImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class TvpImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class VideoLlavaImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class VideoMAEFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class VideoMAEImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ViltFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ViltImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ViltProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ViTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ViTImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class VitMatteImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class VitPoseImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class VivitImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class YolosFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class YolosImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ZoeDepthImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"])
transformers/src/transformers/utils/dummy_vision_objects.py/0
{ "file_path": "transformers/src/transformers/utils/dummy_vision_objects.py", "repo_id": "transformers", "token_count": 7145 }
157
{ "modelname": "BrandNewBERT", "uppercase_modelname": "BRAND_NEW_BERT", "lowercase_modelname": "brand_new_bert", "camelcase_modelname": "BrandNewBert", "has_slow_class": ["True", "False"], "has_fast_class": ["True", "False"], "slow_tokenizer_use_sentencepiece": ["True", "False"], "authors": "The HuggingFace Team" }
transformers/templates/adding_a_missing_tokenization_test/cookiecutter.json/0
{ "file_path": "transformers/templates/adding_a_missing_tokenization_test/cookiecutter.json", "repo_id": "transformers", "token_count": 129 }
158
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import pytest from transformers import load_tool from transformers.agents.agent_types import AGENT_TYPE_MAPPING from transformers.agents.default_tools import BASE_PYTHON_TOOLS from transformers.agents.python_interpreter import InterpreterError, evaluate_python_code from .test_tools_common import ToolTesterMixin # Fake function we will use as tool def add_two(x): return x + 2 class PythonInterpreterToolTester(unittest.TestCase, ToolTesterMixin): def setUp(self): self.tool = load_tool("python_interpreter", authorized_imports=["sqlite3"]) self.tool.setup() def test_exact_match_arg(self): result = self.tool("(2 / 2) * 4") self.assertEqual(result, "4.0") def test_exact_match_kwarg(self): result = self.tool(code="(2 / 2) * 4") self.assertEqual(result, "4.0") def test_agent_type_output(self): inputs = ["2 * 2"] output = self.tool(*inputs) output_type = AGENT_TYPE_MAPPING[self.tool.output_type] self.assertTrue(isinstance(output, output_type)) def test_agent_types_inputs(self): inputs = ["2 * 2"] _inputs = [] for _input, expected_input in zip(inputs, self.tool.inputs.values()): input_type = expected_input["type"] if isinstance(input_type, list): _inputs.append([AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type]) else: _inputs.append(AGENT_TYPE_MAPPING[input_type](_input)) # Should not raise an error output = self.tool(*inputs) output_type = AGENT_TYPE_MAPPING[self.tool.output_type] self.assertTrue(isinstance(output, output_type)) class PythonInterpreterTester(unittest.TestCase): def test_evaluate_assign(self): code = "x = 3" state = {} result = evaluate_python_code(code, {}, state=state) assert result == 3 self.assertDictEqual(state, {"x": 3, "print_outputs": ""}) code = "x = y" state = {"y": 5} result = evaluate_python_code(code, {}, state=state) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(state, {"x": 5, "y": 5, "print_outputs": ""}) code = "a=1;b=None" result = evaluate_python_code(code, {}, state={}) # evaluate returns the value of the last assignment. assert result is None def test_assignment_cannot_overwrite_tool(self): code = "print = '3'" with pytest.raises(InterpreterError) as e: evaluate_python_code(code, {"print": print}, state={}) assert "Cannot assign to name 'print': doing this would erase the existing tool!" in str(e) def test_evaluate_call(self): code = "y = add_two(x)" state = {"x": 3} result = evaluate_python_code(code, {"add_two": add_two}, state=state) assert result == 5 self.assertDictEqual(state, {"x": 3, "y": 5, "print_outputs": ""}) # Should not work without the tool with pytest.raises(InterpreterError) as e: evaluate_python_code(code, {}, state=state) assert "tried to execute add_two" in str(e.value) def test_evaluate_constant(self): code = "x = 3" state = {} result = evaluate_python_code(code, {}, state=state) assert result == 3 self.assertDictEqual(state, {"x": 3, "print_outputs": ""}) def test_evaluate_dict(self): code = "test_dict = {'x': x, 'y': add_two(x)}" state = {"x": 3} result = evaluate_python_code(code, {"add_two": add_two}, state=state) self.assertDictEqual(result, {"x": 3, "y": 5}) self.assertDictEqual(state, {"x": 3, "test_dict": {"x": 3, "y": 5}, "print_outputs": ""}) def test_evaluate_expression(self): code = "x = 3\ny = 5" state = {} result = evaluate_python_code(code, {}, state=state) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(state, {"x": 3, "y": 5, "print_outputs": ""}) def test_evaluate_f_string(self): code = "text = f'This is x: {x}.'" state = {"x": 3} result = evaluate_python_code(code, {}, state=state) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(state, {"x": 3, "text": "This is x: 3.", "print_outputs": ""}) def test_evaluate_if(self): code = "if x <= 3:\n y = 2\nelse:\n y = 5" state = {"x": 3} result = evaluate_python_code(code, {}, state=state) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(state, {"x": 3, "y": 2, "print_outputs": ""}) state = {"x": 8} result = evaluate_python_code(code, {}, state=state) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(state, {"x": 8, "y": 5, "print_outputs": ""}) def test_evaluate_list(self): code = "test_list = [x, add_two(x)]" state = {"x": 3} result = evaluate_python_code(code, {"add_two": add_two}, state=state) self.assertListEqual(result, [3, 5]) self.assertDictEqual(state, {"x": 3, "test_list": [3, 5], "print_outputs": ""}) def test_evaluate_name(self): code = "y = x" state = {"x": 3} result = evaluate_python_code(code, {}, state=state) assert result == 3 self.assertDictEqual(state, {"x": 3, "y": 3, "print_outputs": ""}) def test_evaluate_subscript(self): code = "test_list = [x, add_two(x)]\ntest_list[1]" state = {"x": 3} result = evaluate_python_code(code, {"add_two": add_two}, state=state) assert result == 5 self.assertDictEqual(state, {"x": 3, "test_list": [3, 5], "print_outputs": ""}) code = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']" state = {"x": 3} result = evaluate_python_code(code, {"add_two": add_two}, state=state) assert result == 5 self.assertDictEqual(state, {"x": 3, "test_dict": {"x": 3, "y": 5}, "print_outputs": ""}) code = "vendor = {'revenue': 31000, 'rent': 50312}; vendor['ratio'] = round(vendor['revenue'] / vendor['rent'], 2)" state = {} evaluate_python_code(code, {"min": min, "print": print, "round": round}, state=state) assert state["vendor"] == {"revenue": 31000, "rent": 50312, "ratio": 0.62} def test_subscript_string_with_string_index_raises_appropriate_error(self): code = """ search_results = "[{'title': 'Paris, Ville de Paris, France Weather Forecast | AccuWeather', 'href': 'https://www.accuweather.com/en/fr/paris/623/weather-forecast/623', 'body': 'Get the latest weather forecast for Paris, Ville de Paris, France , including hourly, daily, and 10-day outlooks. AccuWeather provides you with reliable and accurate information on temperature ...'}]" for result in search_results: if 'current' in result['title'].lower() or 'temperature' in result['title'].lower(): current_weather_url = result['href'] print(current_weather_url) break""" with pytest.raises(InterpreterError) as e: evaluate_python_code(code, BASE_PYTHON_TOOLS, state={}) assert "You're trying to subscript a string with a string index" in e def test_evaluate_for(self): code = "x = 0\nfor i in range(3):\n x = i" state = {} result = evaluate_python_code(code, {"range": range}, state=state) assert result == 2 self.assertDictEqual(state, {"x": 2, "i": 2, "print_outputs": ""}) def test_evaluate_binop(self): code = "y + x" state = {"x": 3, "y": 6} result = evaluate_python_code(code, {}, state=state) assert result == 9 self.assertDictEqual(state, {"x": 3, "y": 6, "print_outputs": ""}) def test_recursive_function(self): code = """ def recur_fibo(n): if n <= 1: return n else: return(recur_fibo(n-1) + recur_fibo(n-2)) recur_fibo(6)""" result = evaluate_python_code(code, {}, state={}) assert result == 8 def test_evaluate_string_methods(self): code = "'hello'.replace('h', 'o').split('e')" result = evaluate_python_code(code, {}, state={}) assert result == ["o", "llo"] def test_evaluate_slicing(self): code = "'hello'[1:3][::-1]" result = evaluate_python_code(code, {}, state={}) assert result == "le" def test_access_attributes(self): code = "integer = 1\nobj_class = integer.__class__\nobj_class" result = evaluate_python_code(code, {}, state={}) assert result is int def test_list_comprehension(self): code = "sentence = 'THESEAGULL43'\nmeaningful_sentence = '-'.join([char.lower() for char in sentence if char.isalpha()])" result = evaluate_python_code(code, {}, state={}) assert result == "t-h-e-s-e-a-g-u-l-l" def test_string_indexing(self): code = """text_block = [ "THESE", "AGULL" ] sentence = "" for block in text_block: for col in range(len(text_block[0])): sentence += block[col] """ result = evaluate_python_code(code, {"len": len, "range": range}, state={}) assert result == "THESEAGULL" def test_tuples(self): code = "x = (1, 2, 3)\nx[1]" result = evaluate_python_code(code, {}, state={}) assert result == 2 code = """ digits, i = [1, 2, 3], 1 digits[i], digits[i + 1] = digits[i + 1], digits[i]""" evaluate_python_code(code, {"range": range, "print": print, "int": int}, {}) code = """ def calculate_isbn_10_check_digit(number): total = sum((10 - i) * int(digit) for i, digit in enumerate(number)) remainder = total % 11 check_digit = 11 - remainder if check_digit == 10: return 'X' elif check_digit == 11: return '0' else: return str(check_digit) # Given 9-digit numbers numbers = [ "478225952", "643485613", "739394228", "291726859", "875262394", "542617795", "031810713", "957007669", "871467426" ] # Calculate check digits for each number check_digits = [calculate_isbn_10_check_digit(number) for number in numbers] print(check_digits) """ state = {} evaluate_python_code( code, {"range": range, "print": print, "sum": sum, "enumerate": enumerate, "int": int, "str": str}, state ) def test_listcomp(self): code = "x = [i for i in range(3)]" result = evaluate_python_code(code, {"range": range}, state={}) assert result == [0, 1, 2] def test_break_continue(self): code = "for i in range(10):\n if i == 5:\n break\ni" result = evaluate_python_code(code, {"range": range}, state={}) assert result == 5 code = "for i in range(10):\n if i == 5:\n continue\ni" result = evaluate_python_code(code, {"range": range}, state={}) assert result == 9 def test_call_int(self): code = "import math\nstr(math.ceil(149))" result = evaluate_python_code(code, {"str": lambda x: str(x)}, state={}) assert result == "149" def test_lambda(self): code = "f = lambda x: x + 2\nf(3)" result = evaluate_python_code(code, {}, state={}) assert result == 5 def test_dictcomp(self): code = "x = {i: i**2 for i in range(3)}" result = evaluate_python_code(code, {"range": range}, state={}) assert result == {0: 0, 1: 1, 2: 4} code = "{num: name for num, name in {101: 'a', 102: 'b'}.items() if name not in ['a']}" result = evaluate_python_code(code, {"print": print}, state={}, authorized_imports=["pandas"]) assert result == {102: "b"} code = """ shifts = {'A': ('6:45', '8:00'), 'B': ('10:00', '11:45')} shift_minutes = {worker: ('a', 'b') for worker, (start, end) in shifts.items()} """ result = evaluate_python_code(code, {}, state={}) assert result == {"A": ("a", "b"), "B": ("a", "b")} def test_tuple_assignment(self): code = "a, b = 0, 1\nb" result = evaluate_python_code(code, BASE_PYTHON_TOOLS, state={}) assert result == 1 def test_while(self): code = "i = 0\nwhile i < 3:\n i += 1\ni" result = evaluate_python_code(code, BASE_PYTHON_TOOLS, state={}) assert result == 3 # test infinite loop code = "i = 0\nwhile i < 3:\n i -= 1\ni" with pytest.raises(InterpreterError) as e: evaluate_python_code(code, BASE_PYTHON_TOOLS, state={}) assert "iterations in While loop exceeded" in str(e) # test lazy evaluation code = """ house_positions = [0, 7, 10, 15, 18, 22, 22] i, n, loc = 0, 7, 30 while i < n and house_positions[i] <= loc: i += 1 """ state = {} evaluate_python_code(code, BASE_PYTHON_TOOLS, state=state) def test_generator(self): code = "a = [1, 2, 3, 4, 5]; b = (i**2 for i in a); list(b)" result = evaluate_python_code(code, BASE_PYTHON_TOOLS, state={}) assert result == [1, 4, 9, 16, 25] def test_boolops(self): code = """if (not (a > b and a > c)) or d > e: best_city = "Brooklyn" else: best_city = "Manhattan" best_city """ result = evaluate_python_code(code, BASE_PYTHON_TOOLS, state={"a": 1, "b": 2, "c": 3, "d": 4, "e": 5}) assert result == "Brooklyn" code = """if d > e and a < b: best_city = "Brooklyn" elif d < e and a < b: best_city = "Sacramento" else: best_city = "Manhattan" best_city """ result = evaluate_python_code(code, BASE_PYTHON_TOOLS, state={"a": 1, "b": 2, "c": 3, "d": 4, "e": 5}) assert result == "Sacramento" def test_if_conditions(self): code = """char='a' if char.isalpha(): print('2')""" state = {} evaluate_python_code(code, BASE_PYTHON_TOOLS, state=state) assert state["print_outputs"] == "2\n" def test_imports(self): code = "import math\nmath.sqrt(4)" result = evaluate_python_code(code, BASE_PYTHON_TOOLS, state={}) assert result == 2.0 code = "from random import choice, seed\nseed(12)\nchoice(['win', 'lose', 'draw'])" result = evaluate_python_code(code, BASE_PYTHON_TOOLS, state={}) assert result == "lose" code = "import time, re\ntime.sleep(0.1)" result = evaluate_python_code(code, BASE_PYTHON_TOOLS, state={}) assert result is None code = "from queue import Queue\nq = Queue()\nq.put(1)\nq.get()" result = evaluate_python_code(code, BASE_PYTHON_TOOLS, state={}) assert result == 1 code = "import itertools\nlist(itertools.islice(range(10), 3))" result = evaluate_python_code(code, BASE_PYTHON_TOOLS, state={}) assert result == [0, 1, 2] code = "import re\nre.search('a', 'abc').group()" result = evaluate_python_code(code, BASE_PYTHON_TOOLS, state={}) assert result == "a" code = "import stat\nstat.S_ISREG(0o100644)" result = evaluate_python_code(code, BASE_PYTHON_TOOLS, state={}) assert result code = "import statistics\nstatistics.mean([1, 2, 3, 4, 4])" result = evaluate_python_code(code, BASE_PYTHON_TOOLS, state={}) assert result == 2.8 code = "import unicodedata\nunicodedata.name('A')" result = evaluate_python_code(code, BASE_PYTHON_TOOLS, state={}) assert result == "LATIN CAPITAL LETTER A" # Test submodules are handled properly, thus not raising error code = "import numpy.random as rd\nrng = rd.default_rng(12345)\nrng.random()" result = evaluate_python_code(code, BASE_PYTHON_TOOLS, state={}, authorized_imports=["numpy"]) code = "from numpy.random import default_rng as d_rng\nrng = d_rng(12345)\nrng.random()" result = evaluate_python_code(code, BASE_PYTHON_TOOLS, state={}, authorized_imports=["numpy"]) def test_additional_imports(self): code = "import numpy as np" evaluate_python_code(code, authorized_imports=["numpy"], state={}) code = "import numpy.random as rd" evaluate_python_code(code, authorized_imports=["numpy.random"], state={}) evaluate_python_code(code, authorized_imports=["numpy"], state={}) with pytest.raises(InterpreterError): evaluate_python_code(code, authorized_imports=["random"], state={}) def test_multiple_comparators(self): code = "0 <= -1 < 4 and 0 <= -5 < 4" result = evaluate_python_code(code, BASE_PYTHON_TOOLS, state={}) assert not result code = "0 <= 1 < 4 and 0 <= -5 < 4" result = evaluate_python_code(code, BASE_PYTHON_TOOLS, state={}) assert not result code = "0 <= 4 < 4 and 0 <= 3 < 4" result = evaluate_python_code(code, BASE_PYTHON_TOOLS, state={}) assert not result code = "0 <= 3 < 4 and 0 <= 3 < 4" result = evaluate_python_code(code, BASE_PYTHON_TOOLS, state={}) assert result def test_print_output(self): code = "print('Hello world!')\nprint('Ok no one cares')" state = {} result = evaluate_python_code(code, BASE_PYTHON_TOOLS, state=state) assert result is None assert state["print_outputs"] == "Hello world!\nOk no one cares\n" # test print in function code = """ print("1") def function(): print("2") function()""" state = {} evaluate_python_code(code, {"print": print}, state=state) assert state["print_outputs"] == "1\n2\n" def test_tuple_target_in_iterator(self): code = "for a, b in [('Ralf Weikert', 'Austria'), ('Samuel Seungwon Lee', 'South Korea')]:res = a.split()[0]" result = evaluate_python_code(code, BASE_PYTHON_TOOLS, state={}) assert result == "Samuel" def test_classes(self): code = """ class Animal: species = "Generic Animal" def __init__(self, name, age): self.name = name self.age = age def sound(self): return "The animal makes a sound." def __str__(self): return f"{self.name}, {self.age} years old" class Dog(Animal): species = "Canine" def __init__(self, name, age, breed): super().__init__(name, age) self.breed = breed def sound(self): return "The dog barks." def __str__(self): return f"{self.name}, {self.age} years old, {self.breed}" class Cat(Animal): def sound(self): return "The cat meows." def __str__(self): return f"{self.name}, {self.age} years old, {self.species}" # Testing multiple instances dog1 = Dog("Fido", 3, "Labrador") dog2 = Dog("Buddy", 5, "Golden Retriever") # Testing method with built-in function animals = [dog1, dog2, Cat("Whiskers", 2)] num_animals = len(animals) # Testing exceptions in methods class ExceptionTest: def method_that_raises(self): raise ValueError("An error occurred") try: exc_test = ExceptionTest() exc_test.method_that_raises() except ValueError as e: exception_message = str(e) # Collecting results dog1_sound = dog1.sound() dog1_str = str(dog1) dog2_sound = dog2.sound() dog2_str = str(dog2) cat = Cat("Whiskers", 2) cat_sound = cat.sound() cat_str = str(cat) """ state = {} evaluate_python_code(code, {"print": print, "len": len, "super": super, "str": str, "sum": sum}, state=state) # Assert results assert state["dog1_sound"] == "The dog barks." assert state["dog1_str"] == "Fido, 3 years old, Labrador" assert state["dog2_sound"] == "The dog barks." assert state["dog2_str"] == "Buddy, 5 years old, Golden Retriever" assert state["cat_sound"] == "The cat meows." assert state["cat_str"] == "Whiskers, 2 years old, Generic Animal" assert state["num_animals"] == 3 assert state["exception_message"] == "An error occurred" def test_variable_args(self): code = """ def var_args_method(self, *args, **kwargs): return sum(args) + sum(kwargs.values()) var_args_method(1, 2, 3, x=4, y=5) """ state = {} result = evaluate_python_code(code, {"sum": sum}, state=state) assert result == 15 def test_exceptions(self): code = """ def method_that_raises(self): raise ValueError("An error occurred") try: method_that_raises() except ValueError as e: exception_message = str(e) """ state = {} evaluate_python_code(code, {"print": print, "len": len, "super": super, "str": str, "sum": sum}, state=state) assert state["exception_message"] == "An error occurred" def test_print(self): code = "print(min([1, 2, 3]))" state = {} evaluate_python_code(code, {"min": min, "print": print}, state=state) assert state["print_outputs"] == "1\n" def test_types_as_objects(self): code = "type_a = float(2); type_b = str; type_c = int" state = {} result = evaluate_python_code(code, {"float": float, "str": str, "int": int}, state=state) assert result is int def test_tuple_id(self): code = """ food_items = {"apple": 2, "banana": 3, "orange": 1, "pear": 1} unique_food_items = [item for item, count in food_item_counts.items() if count == 1] """ state = {} result = evaluate_python_code(code, {}, state=state) assert result == ["orange", "pear"] def test_nonsimple_augassign(self): code = """ counts_dict = {'a': 0} counts_dict['a'] += 1 counts_list = [1, 2, 3] counts_list += [4, 5, 6] class Counter: self.count = 0 a = Counter() a.count += 1 """ state = {} evaluate_python_code(code, {}, state=state) assert state["counts_dict"] == {"a": 1} assert state["counts_list"] == [1, 2, 3, 4, 5, 6] assert state["a"].count == 1 def test_adding_int_to_list_raises_error(self): code = """ counts = [1, 2, 3] counts += 1""" with pytest.raises(InterpreterError) as e: evaluate_python_code(code, BASE_PYTHON_TOOLS, state={}) assert "Cannot add non-list value 1 to a list." in str(e) def test_error_highlights_correct_line_of_code(self): code = """# Ok this is a very long code # It has many commented lines a = 1 b = 2 # Here is another piece counts = [1, 2, 3] counts += 1 b += 1""" with pytest.raises(InterpreterError) as e: evaluate_python_code(code, BASE_PYTHON_TOOLS, state={}) assert "Evaluation stopped at line 'counts += 1" in str(e) def test_assert(self): code = """ assert 1 == 1 assert 1 == 2 """ with pytest.raises(AssertionError) as e: evaluate_python_code(code, BASE_PYTHON_TOOLS, state={}) assert "1 == 2" in str(e) and "1 == 1" not in str(e) def test_with_context_manager(self): code = """ class SimpleLock: def __init__(self): self.locked = False def __enter__(self): self.locked = True return self def __exit__(self, exc_type, exc_value, traceback): self.locked = False lock = SimpleLock() with lock as l: assert l.locked == True assert lock.locked == False """ state = {} tools = {} evaluate_python_code(code, tools, state=state) def test_default_arg_in_function(self): code = """ def f(a, b=333, n=1000): return b + n n = f(1, n=667) """ res = evaluate_python_code(code, {}, {}) assert res == 1000 def test_set(self): code = """ S1 = {'a', 'b', 'c'} S2 = {'b', 'c', 'd'} S3 = S1.difference(S2) S4 = S1.intersection(S2) """ state = {} evaluate_python_code(code, {}, state=state) assert state["S3"] == {"a"} assert state["S4"] == {"b", "c"} def test_break(self): code = """ i = 0 while True: i+= 1 if i==3: break i""" result = evaluate_python_code(code, {"print": print, "round": round}, state={}) assert result == 3 def test_return(self): # test early returns code = """ def add_one(n, shift): if True: return n + shift return n add_one(1, 1) """ state = {} result = evaluate_python_code(code, {"print": print, "range": range, "ord": ord, "chr": chr}, state=state) assert result == 2 # test returning None code = """ def returns_none(a): return returns_none(1) """ state = {} result = evaluate_python_code(code, {"print": print, "range": range, "ord": ord, "chr": chr}, state=state) assert result is None def test_nested_for_loop(self): code = """ all_res = [] for i in range(10): subres = [] for j in range(i): subres.append(j) all_res.append(subres) out = [i for sublist in all_res for i in sublist] out[:10] """ state = {} result = evaluate_python_code(code, {"print": print, "range": range}, state=state) assert result == [0, 0, 1, 0, 1, 2, 0, 1, 2, 3] def test_pandas(self): code = """ import pandas as pd df = pd.DataFrame.from_dict({'SetCount': ['5', '4', '5'], 'Quantity': [1, 0, -1]}) df['SetCount'] = pd.to_numeric(df['SetCount'], errors='coerce') parts_with_5_set_count = df[df['SetCount'] == 5.0] parts_with_5_set_count[['Quantity', 'SetCount']].values[1] """ state = {} result = evaluate_python_code(code, {}, state=state, authorized_imports=["pandas"]) assert np.array_equal(result, [-1, 5]) code = """ import pandas as pd df = pd.DataFrame.from_dict({"AtomicNumber": [111, 104, 105], "ok": [0, 1, 2]}) print("HH0") # Filter the DataFrame to get only the rows with outdated atomic numbers filtered_df = df.loc[df['AtomicNumber'].isin([104])] """ result = evaluate_python_code(code, {"print": print}, state={}, authorized_imports=["pandas"]) assert np.array_equal(result.values[0], [104, 1]) code = """import pandas as pd data = pd.DataFrame.from_dict([ {"Pclass": 1, "Survived": 1}, {"Pclass": 2, "Survived": 0}, {"Pclass": 2, "Survived": 1} ]) survival_rate_by_class = data.groupby('Pclass')['Survived'].mean() """ result = evaluate_python_code(code, {}, state={}, authorized_imports=["pandas"]) assert result.values[1] == 0.5 def test_starred(self): code = """ from math import radians, sin, cos, sqrt, atan2 def haversine(lat1, lon1, lat2, lon2): R = 6371000 # Radius of the Earth in meters lat1, lon1, lat2, lon2 = map(radians, [lat1, lon1, lat2, lon2]) dlat = lat2 - lat1 dlon = lon2 - lon1 a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2 c = 2 * atan2(sqrt(a), sqrt(1 - a)) distance = R * c return distance coords_geneva = (46.1978, 6.1342) coords_barcelona = (41.3869, 2.1660) distance_geneva_barcelona = haversine(*coords_geneva, *coords_barcelona) """ result = evaluate_python_code(code, {"print": print, "map": map}, state={}, authorized_imports=["math"]) assert round(result, 1) == 622395.4 def test_for(self): code = """ shifts = { "Worker A": ("6:45 pm", "8:00 pm"), "Worker B": ("10:00 am", "11:45 am") } shift_intervals = {} for worker, (start, end) in shifts.items(): shift_intervals[worker] = end shift_intervals """ result = evaluate_python_code(code, {"print": print, "map": map}, state={}) assert result == {"Worker A": "8:00 pm", "Worker B": "11:45 am"}
transformers/tests/agents/test_python_interpreter.py/0
{ "file_path": "transformers/tests/agents/test_python_interpreter.py", "repo_id": "transformers", "token_count": 12302 }
159
{ "feature_extractor_type": "Wav2Vec2FeatureExtractor", "processor_class": "Wav2Vec2Processor" }
transformers/tests/fixtures/dummy_feature_extractor_config.json/0
{ "file_path": "transformers/tests/fixtures/dummy_feature_extractor_config.json", "repo_id": "transformers", "token_count": 42 }
160
""" Framework agnostic tests for generate()-related methods. """ import numpy as np from transformers import AutoTokenizer from transformers.testing_utils import slow, torch_device class GenerationIntegrationTestsMixin: # To be populated by the child classes framework_dependent_parameters = { "AutoModelForCausalLM": None, "AutoModelForSpeechSeq2Seq": None, "AutoModelForSeq2SeqLM": None, "AutoModelForVision2Seq": None, "LogitsProcessorList": None, "MinLengthLogitsProcessor": None, "create_tensor_fn": None, "floats_tensor": None, "return_tensors": None, "set_seed": None, } def test_validate_generation_inputs(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] create_tensor_fn = self.framework_dependent_parameters["create_tensor_fn"] tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") model = model_cls.from_pretrained("hf-internal-testing/tiny-random-t5") encoder_input_str = "Hello world" input_ids = tokenizer(encoder_input_str, return_tensors=return_tensors).input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(ValueError, "do_samples"): model.generate(input_ids, do_samples=True) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(ValueError, "foo"): fake_model_kwargs = {"foo": "bar"} model.generate(input_ids, **fake_model_kwargs) # however, valid model_kwargs are accepted valid_model_kwargs = {"attention_mask": create_tensor_fn(np.zeros_like(input_ids))} model.generate(input_ids, **valid_model_kwargs) def test_custom_logits_processor(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] logits_processor_list_cls = self.framework_dependent_parameters["LogitsProcessorList"] min_length_logits_processor_cls = self.framework_dependent_parameters["MinLengthLogitsProcessor"] return_tensors = self.framework_dependent_parameters["return_tensors"] bart_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_model = model_cls.from_pretrained("hf-internal-testing/tiny-random-bart", min_length=1) input_ids = bart_tokenizer(article, return_tensors=return_tensors).input_ids logits_processor = logits_processor_list_cls() logits_processor.append(min_length_logits_processor_cls(min_length=10, eos_token_id=0)) # it should not be allowed to both define `min_length` via config and `logits_processor` list with self.assertRaises(ValueError): bart_model.generate(input_ids, logits_processor=logits_processor) bart_model.config.min_length = None bart_model.generate(input_ids, logits_processor=logits_processor) def test_max_new_tokens_encoder_decoder(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = model_cls.from_pretrained("hf-internal-testing/tiny-random-bart") input_ids = bart_tokenizer(article, return_tensors=return_tensors).input_ids if is_pt: bart_model = bart_model.to(torch_device) input_ids = input_ids.to(torch_device) self.assertEqual(list(input_ids.shape), [1, 29]) max_new_tokens = 3 bart_model.config.max_length = 20 bart_model.config.eos_token_id = None # Encoder decoder call outputs = bart_model.generate(input_ids, max_new_tokens=max_new_tokens) # 1 BOS + 3 new tokens self.assertEqual(list(outputs.shape), [1, 4]) # Decoder only call outputs = bart_model.generate(decoder_input_ids=input_ids, max_new_tokens=max_new_tokens) # 1 BOS + 29 (input length) + 3 new tokens self.assertEqual(list(outputs.shape), [1, 33]) # Encoder decoder call > 20 outputs = bart_model.generate(max_new_tokens=max_new_tokens + 20) # 1 BOS + 20 + 3 new tokens self.assertEqual(list(outputs.shape), [1, 24]) def test_max_new_tokens_decoder_only(self): model_cls = self.framework_dependent_parameters["AutoModelForCausalLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") article = """Justin Timberlake.""" gpt2_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") gpt2_model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2") input_ids = gpt2_tokenizer(article, return_tensors=return_tensors).input_ids if is_pt: gpt2_model = gpt2_model.to(torch_device) input_ids = input_ids.to(torch_device) self.assertEqual(list(input_ids.shape), [1, 9]) max_new_tokens = 3 gpt2_model.config.max_length = 20 # call < 20 outputs = gpt2_model.generate(input_ids, max_new_tokens=max_new_tokens) # 9 input_ids + 3 new tokens self.assertEqual(list(outputs.shape), [1, 12]) # call > 20 outputs = gpt2_model.generate(max_new_tokens=max_new_tokens + 20) # 1 BOS token + 23 new tokens self.assertEqual(list(outputs.shape), [1, 24]) def test_encoder_decoder_generate_with_inputs_embeds(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = model_cls.from_pretrained("hf-internal-testing/tiny-random-bart", max_length=5) model.config.eos_token_id = None input_ids = tokenizer(article, return_tensors=return_tensors).input_ids inputs_embeds = model.get_input_embeddings()(input_ids) output_sequences = model.generate(inputs_embeds=inputs_embeds) # make sure model generated correctly until `max_length` self.assertEqual(output_sequences.shape, (1, 5)) def test_transition_scores_greedy_search(self): model_cls = self.framework_dependent_parameters["AutoModelForCausalLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") articles = ["Justin Timberlake", "Michael Phelps"] tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2", padding_side="left") tokenizer.pad_token = tokenizer.eos_token model = model_cls.from_pretrained("distilbert/distilgpt2") model.generation_config.eos_token_id = None input_ids = tokenizer(articles, return_tensors=return_tensors, padding=True).input_ids if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) outputs = model.generate( input_ids=input_ids, max_new_tokens=5, pad_token_id=tokenizer.eos_token_id, return_dict_in_generate=True, output_scores=True, ) transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores) if is_pt: transition_scores = transition_scores.cpu().numpy() expected_scores = np.array( [ [-57.8844, -60.45698, -70.16364, -65.50791, -66.35648], [-54.417572, -60.216614, -62.661243, -58.621933, -58.298683], ] ) self.assertTrue(np.allclose(transition_scores, expected_scores, atol=1e-3)) def test_transition_scores_greedy_search_normalized(self): model_cls = self.framework_dependent_parameters["AutoModelForCausalLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") articles = ["Justin Timberlake", "Michael Phelps"] tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2", padding_side="left") tokenizer.pad_token = tokenizer.eos_token model = model_cls.from_pretrained("distilbert/distilgpt2") model.generation_config.eos_token_id = None input_ids = tokenizer(articles, return_tensors=return_tensors, padding=True).input_ids if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) outputs = model.generate( input_ids=input_ids, max_new_tokens=5, pad_token_id=tokenizer.eos_token_id, return_dict_in_generate=True, output_scores=True, ) transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, normalize_logits=True) if is_pt: transition_scores = transition_scores.cpu().numpy() expected_scores = np.array( [ [-2.538938, -2.2694316, -2.1580915, -1.572299, -2.6719835], [-1.8826028, -2.2461371, -1.7556462, -2.9644494, -1.7996008], ] ) self.assertTrue(np.allclose(transition_scores, expected_scores, atol=1e-3)) def test_transition_scores_beam_search_encoder_decoder(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") articles = [ "Justin Timberlake and Jessica Biel, welcome to parenthood.", "Michael Phelps is arguably the most decorated Olympian of all time.", ] tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = model_cls.from_pretrained( "hf-internal-testing/tiny-random-bart", max_length=10, num_beams=4, num_return_sequences=2, eos_token_id=None, return_dict_in_generate=True, output_scores=True, length_penalty=0.0, ) input_ids = tokenizer(articles, return_tensors=return_tensors, padding=True).input_ids if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) outputs = model.generate(input_ids=input_ids) transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, outputs.beam_indices) if is_pt: transition_scores = transition_scores.cpu().numpy() outputs.sequences_scores = outputs.sequences_scores.cpu().numpy() self.assertTrue(np.allclose(np.sum(transition_scores, axis=-1), outputs.sequences_scores, atol=1e-3)) def test_transition_scores_beam_search_encoder_decoder_with_eos(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") articles = [ "Justin Timberlake and Jessica Biel, welcome to parenthood.", "Michael Phelps is arguably the most decorated Olympian of all time.", ] tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = model_cls.from_pretrained( "hf-internal-testing/tiny-random-bart", max_length=10, num_beams=4, num_return_sequences=2, return_dict_in_generate=True, output_scores=True, length_penalty=0.0, ) input_ids = tokenizer(articles, return_tensors=return_tensors, padding=True).input_ids if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) outputs = model.generate(input_ids=input_ids) transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, outputs.beam_indices) if is_pt: transition_scores = transition_scores.cpu().numpy() outputs.sequences_scores = outputs.sequences_scores.cpu().numpy() self.assertTrue(np.allclose(np.sum(transition_scores, axis=-1), outputs.sequences_scores, atol=1e-3)) def test_transition_scores_beam_search_decoder_only(self): model_cls = self.framework_dependent_parameters["AutoModelForCausalLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") articles = [ "Justin Timberlake", "Michael Phelps", ] tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") tokenizer.pad_token = tokenizer.eos_token model = model_cls.from_pretrained( "hf-internal-testing/tiny-random-gpt2", max_length=10, num_beams=4, num_return_sequences=2, pad_token_id=tokenizer.eos_token_id, eos_token_id=None, return_dict_in_generate=True, output_scores=True, length_penalty=0.0, ) input_ids = tokenizer(articles, return_tensors=return_tensors, padding=True).input_ids if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) outputs = model.generate(input_ids=input_ids) transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, outputs.beam_indices) if is_pt: transition_scores = transition_scores.cpu().numpy() outputs.sequences_scores = outputs.sequences_scores.cpu().numpy() self.assertTrue(np.allclose(np.sum(transition_scores, axis=-1), outputs.sequences_scores, atol=1e-3)) def test_transition_scores_beam_sample_encoder_decoder(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") articles = [ "Justin Timberlake and Jessica Biel, welcome to parenthood.", "Michael Phelps is arguably the most decorated Olympian of all time.", ] tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = model_cls.from_pretrained( "hf-internal-testing/tiny-random-bart", do_sample=True, max_length=10, num_beams=4, num_return_sequences=2, eos_token_id=None, return_dict_in_generate=True, output_scores=True, length_penalty=0.0, ) input_ids = tokenizer(articles, return_tensors=return_tensors, padding=True).input_ids if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) outputs = model.generate(input_ids=input_ids) transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, outputs.beam_indices) if is_pt: transition_scores = transition_scores.cpu().numpy() outputs.sequences_scores = outputs.sequences_scores.cpu().numpy() self.assertTrue(np.allclose(np.sum(transition_scores, axis=-1), outputs.sequences_scores, atol=1e-3)) @slow def test_transition_scores_early_stopping(self): # This is an aggressive test that makes sure that `beam_search's` # transition scores are computed correctly for varying `num_return_sequences`, `num_beams` and `batch_size > 1` # 2 x input_ids for "question: How are you? \n context: I had a long day, " model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] create_tensor_fn = self.framework_dependent_parameters["create_tensor_fn"] is_pt = not model_cls.__name__.startswith("TF") input_ids = create_tensor_fn(2 * [[822, 10, 571, 33, 25, 58, 2625, 10, 27, 141, 3, 9, 307, 239, 6, 1]]) model = model_cls.from_pretrained("google-t5/t5-small") if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) outputs = model.generate( input_ids, max_length=10, return_dict_in_generate=True, output_scores=True, forced_eos_token_id=model.config.eos_token_id, num_beams=4, do_sample=False, num_return_sequences=3, length_penalty=0.0, ) transition_scores = model.compute_transition_scores( sequences=outputs.sequences, scores=outputs.scores, beam_indices=outputs.beam_indices ) if is_pt: transition_scores = transition_scores.cpu().numpy() outputs.sequences_scores = outputs.sequences_scores.cpu().numpy() self.assertTrue(np.allclose(np.sum(transition_scores, axis=-1), outputs.sequences_scores)) def test_encoder_decoder_generate_attention_mask(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") articles = ["Timberlake", "Jessica Biel, welcome to parenthood among other things"] tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") # need extreme generation values here to force this test # to fail when `attention_mask` is not correctly treated in generate model = model_cls.from_pretrained( "hf-internal-testing/tiny-random-bart", max_length=50, num_beams=5, num_return_sequences=5 ) model.config.eos_token_id = None input_ids = tokenizer(articles[0], return_tensors=return_tensors).input_ids input_ids_batched = tokenizer(articles, padding=True, return_tensors=return_tensors).input_ids if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) input_ids_batched = input_ids_batched.to(torch_device) output_sequences_batched = model.generate( input_ids=input_ids_batched, return_dict_in_generate=True, output_scores=True ) output_sequences = model.generate(input_ids=input_ids, return_dict_in_generate=True, output_scores=True) batched_out = output_sequences_batched.sequences_scores out = output_sequences.sequences_scores if is_pt: batched_out = batched_out.cpu().numpy() out = out.cpu().numpy() diff = np.abs(np.sum(batched_out[:5]) - np.sum(out)) self.assertTrue(diff < 1e-4) def test_generate_input_ids_as_kwarg(self): model_cls = self.framework_dependent_parameters["AutoModelForCausalLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") article = """I need input_ids to generate""" tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2", max_length=15) input_ids = tokenizer(article, return_tensors=return_tensors).input_ids if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) output_sequences_kwargs = model.generate(input_ids=input_ids) output_sequences = model.generate(input_ids) if is_pt: output_sequences_kwargs = output_sequences_kwargs.cpu().numpy() output_sequences = output_sequences.cpu().numpy() self.assertTrue(np.array_equal(output_sequences, output_sequences_kwargs)) self.assertEqual(output_sequences.shape, (1, 15)) def test_generate_input_ids_as_encoder_kwarg(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = model_cls.from_pretrained("hf-internal-testing/tiny-random-bart", max_length=5) model.config.eos_token_id = None input_ids = tokenizer(article, return_tensors=return_tensors).input_ids if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) output_sequences_kwargs = model.generate(input_ids=input_ids) output_sequences = model.generate(input_ids) if is_pt: output_sequences_kwargs = output_sequences_kwargs.cpu().numpy() output_sequences = output_sequences.cpu().numpy() self.assertTrue(np.array_equal(output_sequences, output_sequences_kwargs)) self.assertEqual(output_sequences.shape, (1, 5)) def test_generate_inputs_and_encoder_kwargs(self): model_cls = self.framework_dependent_parameters["AutoModelForCausalLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] article = """I need input_ids to generate""" tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2", max_length=10) input_ids = tokenizer(article, return_tensors=return_tensors).input_ids with self.assertRaises(ValueError): model.generate(input_ids, input_ids=input_ids) def test_generate_too_many_encoder_kwargs(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] article = """I need input_ids to generate""" tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = model_cls.from_pretrained("hf-internal-testing/tiny-random-bart", max_length=10) input_ids = tokenizer(article, return_tensors=return_tensors).input_ids with self.assertRaises(ValueError): model.generate(input_ids=input_ids, inputs_embeds=input_ids) def test_generate_input_features_as_encoder_kwarg(self): model_cls = self.framework_dependent_parameters["AutoModelForSpeechSeq2Seq"] floats_tensor = self.framework_dependent_parameters["floats_tensor"] is_pt = not model_cls.__name__.startswith("TF") input_features = floats_tensor((3, 80, 60)) model = model_cls.from_pretrained("hf-internal-testing/tiny-random-WhisperForConditionalGeneration") if is_pt: input_features.to(torch_device) model = model.to(torch_device) output_sequences_kwargs = model.generate(input_features=input_features, max_length=5) output_sequences = model.generate(input_features, max_length=5) if is_pt: output_sequences_kwargs = output_sequences_kwargs.cpu().numpy() output_sequences = output_sequences.cpu().numpy() self.assertTrue(np.array_equal(output_sequences, output_sequences_kwargs)) self.assertEqual(output_sequences.shape, (3, 5)) def test_generate_pixel_values_as_encoder_kwarg(self): model_cls = self.framework_dependent_parameters["AutoModelForVision2Seq"] floats_tensor = self.framework_dependent_parameters["floats_tensor"] is_pt = not model_cls.__name__.startswith("TF") pixel_values = floats_tensor((2, 3, 30, 30)) model = model_cls.from_pretrained("hf-internal-testing/tiny-random-VisionEncoderDecoderModel-vit-gpt2") model.generation_config.eos_token_id = None if is_pt: pixel_values = pixel_values.to(torch_device) model = model.to(torch_device) output_sequences_kwargs = model.generate(pixel_values=pixel_values, max_length=5) output_sequences = model.generate(pixel_values, max_length=5) if is_pt: output_sequences_kwargs = output_sequences_kwargs.cpu().numpy() output_sequences = output_sequences.cpu().numpy() self.assertTrue(np.array_equal(output_sequences, output_sequences_kwargs)) self.assertEqual(output_sequences.shape, (2, 5)) def test_generate_encoder_outputs_attention_mask(self): model_cls = self.framework_dependent_parameters["AutoModelForSpeechSeq2Seq"] floats_tensor = self.framework_dependent_parameters["floats_tensor"] create_tensor_fn = self.framework_dependent_parameters["create_tensor_fn"] is_pt = not model_cls.__name__.startswith("TF") input_features = floats_tensor((3, 80, 60)) attention_mask = create_tensor_fn(np.ones(input_features.shape)) model = model_cls.from_pretrained("hf-internal-testing/tiny-random-WhisperForConditionalGeneration") if is_pt: input_features = input_features.to(torch_device) attention_mask = attention_mask.to(torch_device) model = model.to(torch_device) encoder = model.get_encoder() encoder_outputs = encoder(input_features) output_sequences_no_mask = model.generate(encoder_outputs=encoder_outputs) output_sequences_with_mask = model.generate(encoder_outputs=encoder_outputs, attention_mask=attention_mask) if is_pt: output_sequences_no_mask = output_sequences_no_mask.cpu().numpy() output_sequences_with_mask = output_sequences_with_mask.cpu().numpy() self.assertTrue(np.array_equal(output_sequences_no_mask, output_sequences_with_mask)) def test_eos_token_id_int_and_list_greedy_search(self): model_cls = self.framework_dependent_parameters["AutoModelForCausalLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") generation_kwargs = { "do_sample": False, "num_beams": 1, } expectation = 13 tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") text = """Hello, my dog is cute and""" tokens = tokenizer(text, return_tensors=return_tensors) model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2") if is_pt: model = model.to(torch_device) tokens = tokens.to(torch_device) eos_token_id = 873 generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) eos_token_id = [873, 198] generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) def test_eos_token_id_int_and_list_contrastive_search(self): model_cls = self.framework_dependent_parameters["AutoModelForCausalLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") generation_kwargs = { "do_sample": False, "num_beams": 1, "penalty_alpha": 0.6, "top_k": 4, } expectation = 17 tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") text = """Hello, my dog is cute and""" tokens = tokenizer(text, return_tensors=return_tensors) model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2") if is_pt: model = model.to(torch_device) tokens = tokens.to(torch_device) eos_token_id = 225 generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) eos_token_id = [225, 198] generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) def test_eos_token_id_int_and_list_beam_search(self): model_cls = self.framework_dependent_parameters["AutoModelForCausalLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") generation_kwargs = { "do_sample": False, "num_beams": 3, } expectation = 13 tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") text = """Hello, my dog is cute and""" tokens = tokenizer(text, return_tensors=return_tensors) model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2") if is_pt: model = model.to(torch_device) tokens = tokens.to(torch_device) eos_token_id = 873 generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) unpadded_correct_condition = expectation == len(generated_tokens[0]) padded_correct_condition = expectation < len(generated_tokens[0]) and all( token == model.config.pad_token_id for token in generated_tokens[0][expectation:] ) self.assertTrue(unpadded_correct_condition or padded_correct_condition) eos_token_id = [873, 198] generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) unpadded_correct_condition = expectation == len(generated_tokens[0]) padded_correct_condition = expectation < len(generated_tokens[0]) and all( token == model.config.pad_token_id for token in generated_tokens[0][expectation:] ) self.assertTrue(unpadded_correct_condition or padded_correct_condition) def test_generate_vision2text_conditioning(self): model_cls = self.framework_dependent_parameters["AutoModelForVision2Seq"] floats_tensor = self.framework_dependent_parameters["floats_tensor"] create_tensor_fn = self.framework_dependent_parameters["create_tensor_fn"] is_pt = not model_cls.__name__.startswith("TF") pixel_values = floats_tensor((2, 3, 30, 30)) conditioning_input = create_tensor_fn([[10], [10]]) # this should be the 2nd output token, after the BOS token model = model_cls.from_pretrained("hf-internal-testing/tiny-random-VisionEncoderDecoderModel-vit-gpt2") if is_pt: pixel_values = pixel_values.to(torch_device) model = model.to(torch_device) conditioning_input = conditioning_input.to(torch_device) # we can condition on decoder_input_ids (expected decoder input) and input_ids (which we pipe internally as # decoder_input_ids, if the encoder is not a model with text input) output_sequences_decoder_input_ids = model.generate( pixel_values, max_length=5, decoder_input_ids=conditioning_input ) output_sequences_input_ids = model.generate(pixel_values, max_length=5, input_ids=conditioning_input) if is_pt: output_sequences_decoder_input_ids = output_sequences_decoder_input_ids.cpu().numpy() output_sequences_input_ids = output_sequences_input_ids.cpu().numpy() conditioning_input = conditioning_input.cpu().numpy() self.assertTrue(np.array_equal(output_sequences_decoder_input_ids, output_sequences_input_ids)) self.assertTrue(np.array_equal(output_sequences_decoder_input_ids[:, 1:2], conditioning_input))
transformers/tests/generation/test_framework_agnostic.py/0
{ "file_path": "transformers/tests/generation/test_framework_agnostic.py", "repo_id": "transformers", "token_count": 14177 }
161
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import shutil import tempfile import unittest import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available from ...test_processing_common import ProcessorTesterMixin if is_vision_available(): from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class AlignProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = AlignProcessor def setUp(self): self.tmpdirname = tempfile.mkdtemp() vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) image_processor_map = { "do_resize": True, "size": 20, "do_normalize": True, "image_mean": [0.48145466, 0.4578275, 0.40821073], "image_std": [0.26862954, 0.26130258, 0.27577711], } self.image_processor_file = os.path.join(self.tmpdirname, IMAGE_PROCESSOR_NAME) with open(self.image_processor_file, "w", encoding="utf-8") as fp: json.dump(image_processor_map, fp) def get_tokenizer(self, **kwargs): return BertTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_rust_tokenizer(self, **kwargs): return BertTokenizerFast.from_pretrained(self.tmpdirname, **kwargs) def get_image_processor(self, **kwargs): return EfficientNetImageProcessor.from_pretrained(self.tmpdirname, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_save_load_pretrained_default(self): tokenizer_slow = self.get_tokenizer() tokenizer_fast = self.get_rust_tokenizer() image_processor = self.get_image_processor() processor_slow = AlignProcessor(tokenizer=tokenizer_slow, image_processor=image_processor) processor_slow.save_pretrained(self.tmpdirname) processor_slow = AlignProcessor.from_pretrained(self.tmpdirname, use_fast=False) processor_fast = AlignProcessor(tokenizer=tokenizer_fast, image_processor=image_processor) processor_fast.save_pretrained(self.tmpdirname) processor_fast = AlignProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab()) self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab()) self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab()) self.assertIsInstance(processor_slow.tokenizer, BertTokenizer) self.assertIsInstance(processor_fast.tokenizer, BertTokenizerFast) self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string()) self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string()) self.assertIsInstance(processor_slow.image_processor, EfficientNetImageProcessor) self.assertIsInstance(processor_fast.image_processor, EfficientNetImageProcessor) def test_save_load_pretrained_additional_features(self): processor = AlignProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0) processor = AlignProcessor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, BertTokenizerFast) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, EfficientNetImageProcessor) def test_image_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = AlignProcessor(tokenizer=tokenizer, image_processor=image_processor) image_input = self.prepare_image_inputs() input_image_proc = image_processor(image_input, return_tensors="np") input_processor = processor(images=image_input, return_tensors="np") for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = AlignProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str, padding="max_length", max_length=64) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = AlignProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual(list(inputs.keys()), ["input_ids", "token_type_ids", "attention_mask", "pixel_values"]) # test if it raises when no input is passed with pytest.raises(ValueError): processor() def test_tokenizer_decode(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = AlignProcessor(tokenizer=tokenizer, image_processor=image_processor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_model_input_names(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = AlignProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual(list(inputs.keys()), processor.model_input_names)
transformers/tests/models/align/test_processor_align.py/0
{ "file_path": "transformers/tests/models/align/test_processor_align.py", "repo_id": "transformers", "token_count": 3114 }
162
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class FlaxAutoModelTest(unittest.TestCase): @slow def test_bert_from_pretrained(self): for model_name in ["google-bert/bert-base-cased", "google-bert/bert-large-uncased"]: with self.subTest(model_name): config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = FlaxAutoModel.from_pretrained(model_name) self.assertIsNotNone(model) self.assertIsInstance(model, FlaxBertModel) @slow def test_roberta_from_pretrained(self): for model_name in ["FacebookAI/roberta-base", "FacebookAI/roberta-large"]: with self.subTest(model_name): config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = FlaxAutoModel.from_pretrained(model_name) self.assertIsNotNone(model) self.assertIsInstance(model, FlaxRobertaModel) @slow def test_bert_jax_jit(self): for model_name in ["google-bert/bert-base-cased", "google-bert/bert-large-uncased"]: tokenizer = AutoTokenizer.from_pretrained(model_name) model = FlaxBertModel.from_pretrained(model_name) tokens = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX) @jax.jit def eval(**kwargs): return model(**kwargs) eval(**tokens).block_until_ready() @slow def test_roberta_jax_jit(self): for model_name in ["FacebookAI/roberta-base", "FacebookAI/roberta-large"]: tokenizer = AutoTokenizer.from_pretrained(model_name) model = FlaxRobertaModel.from_pretrained(model_name) tokens = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX) @jax.jit def eval(**kwargs): return model(**kwargs) eval(**tokens).block_until_ready() def test_repo_not_found(self): with self.assertRaisesRegex( EnvironmentError, "bert-base is not a local folder and is not a valid model identifier" ): _ = FlaxAutoModel.from_pretrained("bert-base") def test_revision_not_found(self): with self.assertRaisesRegex( EnvironmentError, r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): _ = FlaxAutoModel.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER, revision="aaaaaa") def test_model_file_not_found(self): with self.assertRaisesRegex( EnvironmentError, "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack", ): _ = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model") def test_model_from_pt_suggestion(self): with self.assertRaisesRegex(EnvironmentError, "Use `from_pt=True` to load this model"): _ = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only")
transformers/tests/models/auto/test_modeling_flax_auto.py/0
{ "file_path": "transformers/tests/models/auto/test_modeling_flax_auto.py", "repo_id": "transformers", "token_count": 1758 }
163
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class BertGenerationEncoderTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=50, initializer_range=0.02, use_labels=True, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.use_labels = use_labels self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if self.use_labels: token_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.get_config() return config, input_ids, input_mask, token_labels def get_config(self): return BertGenerationConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, is_decoder=False, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, input_mask, token_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, input_mask, token_labels, **kwargs, ): model = BertGenerationEncoder(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, **kwargs, ): config.add_cross_attention = True model = BertGenerationEncoder(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, **kwargs, ): config.is_decoder = True config.add_cross_attention = True model = BertGenerationDecoder(config=config).to(torch_device).eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_for_causal_lm( self, config, input_ids, input_mask, token_labels, *args, ): model = BertGenerationDecoder(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def prepare_config_and_inputs_for_common(self): config, input_ids, input_mask, token_labels = self.prepare_config_and_inputs() inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class BertGenerationEncoderTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () all_generative_model_classes = (BertGenerationDecoder,) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder} if is_torch_available() else {} ) def setUp(self): self.model_tester = BertGenerationEncoderTester(self) self.config_tester = ConfigTester(self, config_class=BertGenerationConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_as_bert(self): config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs() config.model_type = "bert" self.model_tester.create_and_check_model(config, input_ids, input_mask, token_labels) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_model_as_decoder_with_default_input_mask(self): # This regression test was failing with PyTorch < 1.3 ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) @slow def test_model_from_pretrained(self): model = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") self.assertIsNotNone(model) @require_torch class BertGenerationEncoderIntegrationTest(unittest.TestCase): @slow def test_inference_no_head_absolute_embedding(self): model = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") input_ids = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]]) with torch.no_grad(): output = model(input_ids)[0] expected_shape = torch.Size([1, 8, 1024]) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4) @require_torch class BertGenerationDecoderIntegrationTest(unittest.TestCase): @slow def test_inference_no_head_absolute_embedding(self): model = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") input_ids = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]]) with torch.no_grad(): output = model(input_ids)[0] expected_shape = torch.Size([1, 8, 50358]) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
transformers/tests/models/bert_generation/test_modeling_bert_generation.py/0
{ "file_path": "transformers/tests/models/bert_generation/test_modeling_bert_generation.py", "repo_id": "transformers", "token_count": 5719 }
164
# coding=utf-8 # Copyright 2020 Google T5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByT5Tokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): FRAMEWORK = "pt" elif is_tf_available(): FRAMEWORK = "tf" else: FRAMEWORK = "jax" class ByT5TokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = ByT5Tokenizer test_rust_tokenizer = False def setUp(self): super().setUp() tokenizer = ByT5Tokenizer() tokenizer.save_pretrained(self.tmpdirname) @cached_property def t5_base_tokenizer(self): return ByT5Tokenizer.from_pretrained("google/byt5-small") def get_tokenizer(self, **kwargs) -> ByT5Tokenizer: return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5) -> Tuple[str, list]: # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for ByT5 because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. toks = [] for i in range(len(tokenizer)): try: tok = tokenizer.decode([i], clean_up_tokenization_spaces=False) except UnicodeDecodeError: pass toks.append((i, tok)) toks = list(filter(lambda t: re.match(r"^[ a-zA-Z]+$", t[1]), toks)) toks = list(filter(lambda t: [t[0]] == tokenizer.encode(t[1], add_special_tokens=False), toks)) if max_length is not None and len(toks) > max_length: toks = toks[:max_length] if min_length is not None and len(toks) < min_length and len(toks) > 0: while len(toks) < min_length: toks = toks + toks # toks_str = [t[1] for t in toks] toks_ids = [t[0] for t in toks] # Ensure consistency output_txt = tokenizer.decode(toks_ids, clean_up_tokenization_spaces=False) if " " not in output_txt and len(toks_ids) > 1: output_txt = ( tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=False) + " " + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=False) ) if with_prefix_space: output_txt = " " + output_txt output_ids = tokenizer.encode(output_txt, add_special_tokens=False) return output_txt, output_ids def test_eos_treatment(self): tokenizer = self.t5_base_tokenizer batch_with_eos_added = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"]) batch_without_eos_added = tokenizer(["hi", "I went to the gym", ""]) self.assertListEqual(batch_with_eos_added["input_ids"], batch_without_eos_added["input_ids"]) def test_multibytes_char(self): tokenizer = self.t5_base_tokenizer src_text = "Unicode €." encoded = tokenizer(src_text) encoded_ids = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded["input_ids"], encoded_ids) # decoding decoded = tokenizer.decode(encoded_ids) self.assertEqual(decoded, "Unicode €.</s>") encoded = tokenizer("e è é ê ë") encoded_ids = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded["input_ids"], encoded_ids) # decoding decoded = tokenizer.decode(encoded_ids) self.assertEqual(decoded, "e è é ê ë</s>") # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë")), "e è é ê ë</s>") def test_prepare_batch_integration(self): tokenizer = self.t5_base_tokenizer src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] expected_src_tokens = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: skip batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK) self.assertIsInstance(batch, BatchEncoding) if FRAMEWORK != "jax": result = list(batch.input_ids.numpy()[0]) else: result = list(batch.input_ids.tolist()[0]) self.assertListEqual(expected_src_tokens, result) self.assertEqual((2, 37), batch.input_ids.shape) self.assertEqual((2, 37), batch.attention_mask.shape) def test_empty_target_text(self): tokenizer = self.t5_base_tokenizer src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK) # check if input_ids are returned and no decoder_input_ids self.assertIn("input_ids", batch) self.assertIn("attention_mask", batch) self.assertNotIn("decoder_input_ids", batch) self.assertNotIn("decoder_attention_mask", batch) def test_max_length_integration(self): tokenizer = self.t5_base_tokenizer tgt_text = [ "Summary of the text.", "Another summary.", ] targets = tokenizer( text_target=tgt_text, max_length=32, padding="max_length", truncation=True, return_tensors=FRAMEWORK ) self.assertEqual(32, targets["input_ids"].shape[1]) def test_eos_in_input(self): tokenizer = self.t5_base_tokenizer src_text = ["A long paragraph for summarization. </s>"] tgt_text = ["Summary of the text. </s>"] expected_src_tokens = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] # fmt: skip expected_tgt_tokens = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: skip batch = tokenizer(src_text, text_target=tgt_text) self.assertEqual(expected_src_tokens, batch["input_ids"][0]) self.assertEqual(expected_tgt_tokens, batch["labels"][0]) # cannot use default save_and_load_tokenizer test method because tokenizer has no vocab def test_save_and_load_tokenizer(self): # safety check on max_len default value so we are sure the test works tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): self.assertNotEqual(tokenizer.model_max_length, 42) # Now let's start the test tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # Isolate this from the other tests because we save additional tokens/etc tmpdirname = tempfile.mkdtemp() sample_text = " He is very happy, UNwant\u00e9d,running" before_tokens = tokenizer.encode(sample_text, add_special_tokens=False) tokenizer.save_pretrained(tmpdirname) after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname) after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False) self.assertListEqual(before_tokens, after_tokens) shutil.rmtree(tmpdirname) tokenizers = self.get_tokenizers(model_max_length=42) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # Isolate this from the other tests because we save additional tokens/etc tmpdirname = tempfile.mkdtemp() sample_text = " He is very happy, UNwant\u00e9d,running" tokenizer.add_tokens(["bim", "bambam"]) additional_special_tokens = tokenizer.additional_special_tokens additional_special_tokens.append("new_additional_special_token") tokenizer.add_special_tokens( {"additional_special_tokens": additional_special_tokens}, replace_additional_special_tokens=False ) before_tokens = tokenizer.encode(sample_text, add_special_tokens=False) tokenizer.save_pretrained(tmpdirname) after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname) after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False) self.assertListEqual(before_tokens, after_tokens) self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens) self.assertEqual(after_tokenizer.model_max_length, 42) tokenizer = tokenizer.__class__.from_pretrained(tmpdirname, model_max_length=43) self.assertEqual(tokenizer.model_max_length, 43) shutil.rmtree(tmpdirname) # There is a conflict between the default value of extra_ids and adding a new special token through additional_special_tokens # We need to add the extra_ids in the list of the arg additional_special_tokens def test_special_tokens_initialization_with_non_empty_additional_special_tokens(self): tokenizer_list = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer())) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer())) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(tmp_dir) with open(os.path.join(tmp_dir, "special_tokens_map.json"), encoding="utf-8") as json_file: special_tokens_map = json.load(json_file) with open(os.path.join(tmp_dir, "tokenizer_config.json"), encoding="utf-8") as json_file: tokenizer_config = json.load(json_file) added_tokens_extra_ids = [f"<extra_id_{i}>" for i in range(125)] special_tokens_map["additional_special_tokens"] = added_tokens_extra_ids + [ "an_additional_special_token" ] tokenizer_config["additional_special_tokens"] = added_tokens_extra_ids + [ "an_additional_special_token" ] with open(os.path.join(tmp_dir, "special_tokens_map.json"), "w", encoding="utf-8") as outfile: json.dump(special_tokens_map, outfile) with open(os.path.join(tmp_dir, "tokenizer_config.json"), "w", encoding="utf-8") as outfile: json.dump(tokenizer_config, outfile) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files tokenizer_without_change_in_init = tokenizer_class.from_pretrained( tmp_dir, ) self.assertIn( "an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"]) ), ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained new_added_tokens = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=True)] tokenizer = tokenizer_class.from_pretrained( tmp_dir, additional_special_tokens=new_added_tokens, ) self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens) self.assertEqual( ["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"]) ), ) def test_decode_single_bytes(self): tokenizer_list = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer())) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer())) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(tmp_dir) tokenizer = tokenizer_class.from_pretrained(tmp_dir) self.assertTrue(tokenizer.decode([255]) == "") @unittest.skip(reason="ByT5Tokenizer does not have a vocabulary") def test_get_vocab(self): pass @unittest.skip(reason="inputs cannot be pretokenized as ids depend on whole input string") def test_pretokenized_inputs(self): pass @unittest.skip(reason="ByT5Tokenizer does not have a vocabulary") def test_conversion_reversible(self): pass def test_convert_tokens_to_string_format(self): # The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings # and special added tokens as tokens tokenizers = self.get_tokenizers(fast=True, do_lower_case=True) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): tokens = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"] string = tokenizer.convert_tokens_to_string(tokens) self.assertIsInstance(string, str) # We need a different implementation of the test of the same name defined in TokenizerTesterMixin because this tokenizer # doesn't have a vocab def test_tokenizers_common_ids_setters(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): attributes_list = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", ] token_id_to_test_setters = 0 token_to_test_setters = tokenizer.convert_ids_to_tokens( token_id_to_test_setters, skip_special_tokens=False ) for attr in attributes_list: setattr(tokenizer, attr + "_id", None) self.assertEqual(getattr(tokenizer, attr), None) self.assertEqual(getattr(tokenizer, attr + "_id"), None) setattr(tokenizer, attr + "_id", token_id_to_test_setters) self.assertEqual(getattr(tokenizer, attr), token_to_test_setters) self.assertEqual(getattr(tokenizer, attr + "_id"), token_id_to_test_setters) setattr(tokenizer, "additional_special_tokens_ids", []) self.assertListEqual(getattr(tokenizer, "additional_special_tokens"), []) self.assertListEqual(getattr(tokenizer, "additional_special_tokens_ids"), []) setattr(tokenizer, "additional_special_tokens_ids", [token_id_to_test_setters]) self.assertListEqual(getattr(tokenizer, "additional_special_tokens"), [token_to_test_setters]) self.assertListEqual(getattr(tokenizer, "additional_special_tokens_ids"), [token_id_to_test_setters])
transformers/tests/models/byt5/test_tokenization_byt5.py/0
{ "file_path": "transformers/tests/models/byt5/test_tokenization_byt5.py", "repo_id": "transformers", "token_count": 7851 }
165
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Clvp model.""" import tempfile import unittest import datasets import numpy as np from transformers import ClvpConfig, ClvpDecoderConfig, ClvpEncoderConfig from transformers.testing_utils import ( cleanup, require_torch, slow, torch_device, ) from transformers.utils import is_torch_available from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ClvpEncoder, ClvpForCausalLM, ClvpModel, ClvpModelForConditionalGeneration from transformers import ClvpFeatureExtractor, ClvpTokenizer class ClvpEncoderTester: def __init__( self, parent, batch_size=2, seq_length=7, is_training=False, use_input_mask=True, use_labels=True, vocab_size=50, hidden_size=128, projection_dim=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=32, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope self.bos_token_id = vocab_size - 1 self.eos_token_id = vocab_size - 1 def get_config(self): encoder_config = ClvpEncoderConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, ) return encoder_config def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 encoder_config = self.get_config() return encoder_config, input_ids, input_mask def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() speech_config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids.to(torch_device), "attention_mask": input_mask.to(torch_device)} return speech_config, inputs_dict def create_and_check_model(self, speech_config, input_ids, input_mask): text_config = ClvpEncoderConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) text_encoder_model = ClvpEncoder(config=text_config) text_encoder_model.to(torch_device) text_encoder_model.eval() with torch.no_grad(): result = text_encoder_model(input_ids, attention_mask=input_mask) result = text_encoder_model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result[0].shape, (self.batch_size, self.projection_dim)) # now check with speech config speech_encoder_model = ClvpEncoder(config=speech_config) speech_encoder_model.to(torch_device) speech_encoder_model.eval() with torch.no_grad(): result = speech_encoder_model(input_ids, attention_mask=input_mask) result = speech_encoder_model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result[0].shape, (self.batch_size, self.projection_dim)) @require_torch class ClvpEncoderTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (ClvpEncoder,) if is_torch_available() else () test_pruning = False test_head_masking = False test_torchscript = False def setUp(self): self.model_tester = ClvpEncoderTester(self) self.encoder_config_tester = ConfigTester(self, config_class=ClvpEncoderConfig, hidden_size=32) def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch cleanup(torch_device) def test_config(self): self.encoder_config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="ClvpEncoder does not output loss") def test_training(self): pass @unittest.skip(reason="ClvpEncoder does not output loss") def test_training_gradient_checkpointing(self): pass class ClvpDecoderTester: def __init__( self, parent, batch_size=2, seq_length=3, is_training=False, vocab_size=300, max_position_embeddings=256, max_text_tokens=256, use_input_mask=True, hidden_size=128, num_hidden_layers=2, num_attention_heads=2, bos_token_id=97, eos_token_id=98, relative_attention_num_buckets=4, relative_attention_max_distance=16, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.max_text_tokens = max_text_tokens self.use_input_mask = use_input_mask self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.num_hidden_layers = num_hidden_layers self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.relative_attention_num_buckets = relative_attention_num_buckets self.relative_attention_max_distance = relative_attention_max_distance def get_config(self): decoder_config = ClvpDecoderConfig( vocab_size=self.vocab_size, max_position_embeddings=self.max_position_embeddings, max_text_tokens=self.max_text_tokens, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, relative_attention_num_buckets=self.relative_attention_num_buckets, relative_attention_max_distance=self.relative_attention_max_distance, ) return decoder_config def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 decoder_config = self.get_config() return decoder_config, input_ids, input_mask def create_and_check_model(self, config, input_ids, attention_mask): model = ClvpForCausalLM(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids=input_ids, attention_mask=attention_mask) self.parent.assertEqual(result[0].shape, (self.batch_size, self.seq_length, self.vocab_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask = config_and_inputs inputs_dict = { "input_ids": input_ids.to(torch_device), "attention_mask": attention_mask.to(torch_device), } return config, inputs_dict @require_torch class ClvpDecoderTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (ClvpModel, ClvpForCausalLM) if is_torch_available() else () all_generative_model_classes = (ClvpForCausalLM,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": ClvpModelForConditionalGeneration} if is_torch_available() else {} test_pruning = False def setUp(self): self.model_tester = ClvpDecoderTester(self) self.decoder_config_tester = ConfigTester(self, config_class=ClvpDecoderConfig, hidden_size=32) def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch cleanup(torch_device) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): if return_labels and model_class == ClvpForCausalLM: inputs_dict["labels"] = torch.zeros( [self.model_tester.batch_size, self.model_tester.seq_length], device=torch_device ).long() return inputs_dict def test_training(self): # we will only test the ClvpForCausalLM since it outputs loss config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True model = ClvpForCausalLM(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, ClvpForCausalLM, return_labels=True) loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): # we will only test the ClvpForCausalLM since it outputs loss config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_cache = False config.return_dict = True model = ClvpForCausalLM(config) model.to(torch_device) model.gradient_checkpointing_enable() model.train() inputs = self._prepare_for_class(inputs_dict, ClvpForCausalLM, return_labels=True) loss = model(**inputs).loss loss.backward() class ClvpModelForConditionalGenerationTester: def __init__(self, parent, is_training=False): self.parent = parent self.clvp_encoder_tester = ClvpEncoderTester(parent) self.is_training = is_training self.batch_size = self.clvp_encoder_tester.batch_size # need bs for batching_equivalence test def get_config(self): decoder_config = ClvpDecoderConfig( vocab_size=50, max_position_embeddings=30, max_text_tokens=30, hidden_size=128, num_hidden_layers=1, num_attention_heads=2, bos_token_id=97, eos_token_id=98, relative_attention_num_buckets=4, relative_attention_max_distance=16, ) text_config = self.clvp_encoder_tester.get_config() speech_config = self.clvp_encoder_tester.get_config() speech_config.vocab_size = 300 return ClvpConfig.from_sub_model_configs( text_config, speech_config, decoder_config, projection_dim=16, ) def prepare_config_and_inputs(self): _, input_ids, attention_mask = self.clvp_encoder_tester.prepare_config_and_inputs() ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ds = ds.cast_column("audio", datasets.Audio(sampling_rate=22050)) _, audio, sr = ds.sort("id").select(range(1))[:1]["audio"][0].values() feature_extractor = ClvpFeatureExtractor() input_features = feature_extractor(raw_speech=audio, sampling_rate=sr, return_tensors="pt")[ "input_features" ].to(torch_device) config = self.get_config() return config, input_ids, attention_mask, input_features def create_and_check_model(self, config, input_ids, attention_mask, input_features): model = ClvpModelForConditionalGeneration(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids=input_ids, input_features=input_features, attention_mask=attention_mask) self.parent.assertEqual(result.logits_per_speech.shape, (2, self.clvp_encoder_tester.batch_size)) self.parent.assertEqual(result.logits_per_text.shape, (self.clvp_encoder_tester.batch_size, 2)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, input_features = config_and_inputs inputs_dict = { "input_ids": input_ids.to(torch_device), "attention_mask": attention_mask.to(torch_device), "input_features": input_features.to(torch_device), "return_loss": False, } return config, inputs_dict @require_torch class ClvpModelForConditionalGenerationTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (ClvpModelForConditionalGeneration,) if is_torch_available() else () test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False test_torchscript = False def setUp(self): self.model_tester = ClvpModelForConditionalGenerationTester(self) common_properties = ["projection_dim", "logit_scale_init_value"] self.clvp_config_tester = ConfigTester( self, config_class=ClvpConfig, has_text_modality=False, common_properties=common_properties, hidden_size=32 ) def test_config(self): self.clvp_config_tester.run_common_tests() def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch cleanup(torch_device) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) # check for decoder model, text encoder model and speech encoder model hidden states decoder_hidden_states = outputs.decoder_hidden_states text_encoder_hidden_states = outputs.text_encoder_hidden_states speech_encoder_hidden_states = outputs.speech_encoder_hidden_states # check length of the hidden states expected_decoder_num_layers = config.decoder_config.num_hidden_layers + 1 self.assertEqual(len(decoder_hidden_states), expected_decoder_num_layers) expected_speech_encoder_num_layers = config.text_config.num_hidden_layers + 1 self.assertEqual(len(text_encoder_hidden_states), expected_speech_encoder_num_layers) expected_text_encoder_num_layers = config.speech_config.num_hidden_layers + 1 self.assertEqual(len(speech_encoder_hidden_states), expected_text_encoder_num_layers) # check shapes of each hidden state # for the decoder model we will only test the dimension because the ClvpConditioningEncoder could increase # the sequence lengths. self.assertEqual(decoder_hidden_states[0].shape[-1], config.decoder_config.hidden_size) # the testing for text encoder stays standard because we just pass the text tokens here. self.assertListEqual( list(text_encoder_hidden_states[0].shape[-2:]), [self.model_tester.clvp_encoder_tester.seq_length, config.text_config.hidden_size], ) # for the decoder model we will only test the dimension because the fix_decoder_outputs method could increase # the sequence lengths by adding `decoder_fixing_codes` tokens at the end. self.assertEqual(speech_encoder_hidden_states[0].shape[-1], config.speech_config.hidden_size) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="ClvpModelForConditionalGeneration does not have get_input_embeddings") def test_inputs_embeds(self): pass @unittest.skip(reason="ClvpModelForConditionalGeneration does not have get_input_embeddings") def test_model_get_set_embeddings(self): pass # override as the `logit_scale` parameter initilization is different for Clvp def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: # check if `logit_scale` is initilized as per the original implementation if name == "logit_scale": expected_value = np.log(1 / 0.07) returned_value = param.data.item() self.assertAlmostEqual( returned_value, expected_value, delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: expected_range = [0.0, 1.0] returned_range = ((param.data.mean() * 1e9).round() / 1e9).item() self.assertIn( returned_range, expected_range, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_load_speech_text_decoder_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Save ClvpConfig and check if we can load ClvpEncoderConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) encoder_config = ClvpEncoderConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), encoder_config.to_dict()) # Save ClvpConfig and check if we can load ClvpDecoderConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) decoder_config = ClvpDecoderConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.decoder_config.to_dict(), decoder_config.to_dict()) @slow def test_model_from_pretrained(self): model_name = "susnato/clvp_dev" model = ClvpModelForConditionalGeneration.from_pretrained(model_name) self.assertIsNotNone(model) # Since Clvp has a lot of different models connected with each other it's better to test each of them individually along # with a test_full_model_integration. If the model breaks in future, it could be of a great help to identify the broken part. @slow @require_torch class ClvpIntegrationTest(unittest.TestCase): def setUp(self): self.text = "This is an example text." ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ds = ds.cast_column("audio", datasets.Audio(sampling_rate=22050)) _, self.speech_samples, self.sr = ds.sort("id").select(range(1))[:1]["audio"][0].values() self.model = ClvpModelForConditionalGeneration.from_pretrained("susnato/clvp_dev").to(torch_device) self.model.eval() tokenizer = ClvpTokenizer.from_pretrained("susnato/clvp_dev") feature_extractor = ClvpFeatureExtractor.from_pretrained("susnato/clvp_dev") tokenizer_output = tokenizer(self.text, return_tensors="pt") self.text_tokens = tokenizer_output["input_ids"].to(torch_device) self.input_features = feature_extractor( raw_speech=self.speech_samples, sampling_rate=self.sr, return_tensors="pt" )["input_features"].to(torch_device) def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch cleanup(torch_device, gc_collect=True) def test_conditional_encoder(self): with torch.no_grad(): conditioning_encoder_outputs = self.model.conditioning_encoder( input_features=self.input_features, input_ids=self.text_tokens ).to("cpu") self.assertEqual( conditioning_encoder_outputs.shape, torch.Size((self.input_features.shape[0], 18, self.model.config.decoder_config.hidden_size)), ) EXPECTED_OUTPUTS = torch.tensor( [[-0.8582, 0.5228, 1.9944], [-0.0465, -1.1017, -0.0093], [-0.0466, -0.6030, -0.1280]] ) torch.testing.assert_close(conditioning_encoder_outputs[0, :3, :3], EXPECTED_OUTPUTS, rtol=1e-4, atol=1e-4) def test_decoder_model_generate(self): autoregressive_model_output = self.model.speech_decoder_model.generate(input_ids=self.text_tokens).cpu() EXPECTED_OUTPUTS = torch.tensor([[147, 2, 54, 2, 43, 2, 169, 122, 29, 64, 2, 136, 37, 33, 9, 8193]]) torch.testing.assert_close(autoregressive_model_output, EXPECTED_OUTPUTS) def test_text_and_speech_encoder_models(self): # check for text embeds text_embeds = self.model.text_encoder_model(input_ids=self.text_tokens, return_dict=True)[0].cpu() # fmt: off EXPECTED_TEXT_EMBEDS = torch.tensor([1.4798, -2.0005, 2.3902, -0.5042, 1.6401, -2.4135, -1.4800, 3.0118, -2.4422, 1.3266, 2.2339, 1.4761, -4.8983, -1.3592, 6.0251, 6.7364, 2.2576, 3.7229, -10.0436, 4.6676]) # fmt: on torch.testing.assert_close(text_embeds[0, :20], EXPECTED_TEXT_EMBEDS, rtol=1e-4, atol=1e-4) # check for speech embeds speech_embeds = self.model.speech_encoder_model(input_ids=self.text_tokens, return_dict=True)[0].cpu() # fmt: off EXPECTED_SPEECH_EMBEDS = torch.tensor([3.1202, -3.1183, -1.4264, -6.1339, 1.8885, -0.1983, 0.9461, -1.7414, 0.3320, -3.8400, -1.5715, 1.5096, -1.7576, 0.2387, 4.9758, 5.8450, -6.2534, 2.8587, -5.5816, 4.7821]) # fmt: on torch.testing.assert_close(speech_embeds[0, :20], EXPECTED_SPEECH_EMBEDS, rtol=1e-4, atol=1e-4) def test_full_model_integration(self): full_model_output = self.model.generate( input_ids=self.text_tokens, input_features=self.input_features, do_sample=False, num_beams=4, num_return_sequences=4, max_new_tokens=10, ) EXPECTED_SPEECH_IDS = torch.tensor([[1953, 1080, 612], [1953, 612, 493], [1953, 612, 716]]) EXPECTED_SIMILARITY_SCORES = torch.tensor([[14.7660, 14.4569, 13.6472, 13.5683]]) torch.testing.assert_close(full_model_output.speech_ids.cpu()[-3:, -3:], EXPECTED_SPEECH_IDS) torch.testing.assert_close(full_model_output.logits_per_text.cpu(), EXPECTED_SIMILARITY_SCORES)
transformers/tests/models/clvp/test_modeling_clvp.py/0
{ "file_path": "transformers/tests/models/clvp/test_modeling_clvp.py", "repo_id": "transformers", "token_count": 11731 }
166
# coding=utf-8 # Copyright 2022 The OpenBMB Team and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch CPMAnt model.""" import unittest from transformers.testing_utils import is_torch_available, require_torch, tooslow from ...generation.test_utils import torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CpmAntConfig, CpmAntForCausalLM, CpmAntModel, CpmAntTokenizer, ) @require_torch class CpmAntModelTester: def __init__( self, parent, batch_size=2, seq_length=8, is_training=True, use_token_type_ids=False, use_input_mask=False, use_labels=False, use_mc_token_ids=False, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, num_buckets=32, max_distance=128, prompt_length=8, prompt_types=8, segment_types=8, init_std=0.02, return_dict=True, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_token_type_ids = use_token_type_ids self.use_input_mask = use_input_mask self.use_labels = use_labels self.use_mc_token_ids = use_mc_token_ids self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.num_buckets = num_buckets self.max_distance = max_distance self.prompt_length = prompt_length self.prompt_types = prompt_types self.segment_types = segment_types self.init_std = init_std self.return_dict = return_dict def prepare_config_and_inputs(self): input_ids = {} input_ids["input_ids"] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).type(torch.int32) input_ids["use_cache"] = False config = self.get_config() return (config, input_ids) def get_config(self): return CpmAntConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, dim_ff=self.intermediate_size, position_bias_num_buckets=self.num_buckets, position_bias_max_distance=self.max_distance, prompt_types=self.prompt_types, prompt_length=self.prompt_length, segment_types=self.segment_types, use_cache=True, init_std=self.init_std, return_dict=self.return_dict, ) def create_and_check_cpmant_model(self, config, input_ids, *args): model = CpmAntModel(config=config) model.to(torch_device) model.eval() hidden_states = model(**input_ids).last_hidden_state self.parent.assertEqual(hidden_states.shape, (self.batch_size, self.seq_length, config.hidden_size)) def create_and_check_lm_head_model(self, config, input_ids, *args): model = CpmAntForCausalLM(config) model.to(torch_device) input_ids["input_ids"] = input_ids["input_ids"].to(torch_device) model.eval() model_output = model(**input_ids) self.parent.assertEqual( model_output.logits.shape, (self.batch_size, self.seq_length, config.vocab_size + config.prompt_types * config.prompt_length), ) def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict @require_torch class CpmAntModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (CpmAntModel, CpmAntForCausalLM) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": CpmAntModel, "text-generation": CpmAntForCausalLM} if is_torch_available() else {} ) test_pruning = False test_missing_keys = False test_mismatched_shapes = False test_head_masking = False test_resize_embeddings = False def setUp(self): self.model_tester = CpmAntModelTester(self) self.config_tester = ConfigTester(self, config_class=CpmAntConfig) def test_config(self): self.config_tester.run_common_tests() def test_inputs_embeds(self): unittest.skip(reason="CPMAnt doesn't support input_embeds.")(self.test_inputs_embeds) def test_retain_grad_hidden_states_attentions(self): unittest.skip( "CPMAnt doesn't support retain grad in hidden_states or attentions, because prompt management will peel off the output.hidden_states from graph.\ So is attentions. We strongly recommand you use loss to tune model." )(self.test_retain_grad_hidden_states_attentions) def test_cpmant_model(self): config, inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_cpmant_model(config, inputs) def test_cpmant_lm_head_model(self): config, inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(config, inputs) @require_torch class CpmAntModelIntegrationTest(unittest.TestCase): @tooslow def test_inference_masked_lm(self): texts = "今天天气真好!" model_path = "openbmb/cpm-ant-10b" model = CpmAntModel.from_pretrained(model_path) tokenizer = CpmAntTokenizer.from_pretrained(model_path) inputs = tokenizer(texts, return_tensors="pt") hidden_states = model(**inputs).last_hidden_state expected_slice = torch.tensor( [[[6.1708, 5.9244, 1.0835], [6.5207, 6.2893, -11.3324], [-1.0107, -0.0576, -5.9577]]], ) torch.testing.assert_close(hidden_states[:, :3, :3], expected_slice, rtol=1e-2, atol=1e-2) @require_torch class CpmAntForCausalLMlIntegrationTest(unittest.TestCase): @tooslow def test_inference_casual(self): texts = "今天天气真好!" model_path = "openbmb/cpm-ant-10b" model = CpmAntForCausalLM.from_pretrained(model_path) tokenizer = CpmAntTokenizer.from_pretrained(model_path) inputs = tokenizer(texts, return_tensors="pt") hidden_states = model(**inputs).logits expected_slice = torch.tensor( [[[-6.4267, -6.4083, -6.3958], [-5.8802, -5.9447, -5.7811], [-5.3896, -5.4820, -5.4295]]], ) torch.testing.assert_close(hidden_states[:, :3, :3], expected_slice, rtol=1e-2, atol=1e-2) @tooslow def test_simple_generation(self): model_path = "openbmb/cpm-ant-10b" model = CpmAntForCausalLM.from_pretrained(model_path) tokenizer = CpmAntTokenizer.from_pretrained(model_path) texts = "今天天气不错," expected_output = "今天天气不错,阳光明媚,我和妈妈一起去超市买东西。\n在超市里,我看到了一个很好玩的玩具,它的名字叫“机器人”。它有一个圆圆的脑袋,两只圆圆的眼睛,还有一个圆圆的" model_inputs = tokenizer(texts, return_tensors="pt") token_ids = model.generate(**model_inputs) output_texts = tokenizer.batch_decode(token_ids) self.assertEqual(expected_output, output_texts) @tooslow def test_batch_generation(self): model_path = "openbmb/cpm-ant-10b" model = CpmAntForCausalLM.from_pretrained(model_path) tokenizer = CpmAntTokenizer.from_pretrained(model_path) texts = ["今天天气不错,", "新年快乐,万事如意!"] expected_output = [ "今天天气不错,阳光明媚,我和妈妈一起去超市买东西。\n在超市里,我看到了一个很好玩的玩具,它的名字叫“机器人”。它有一个圆圆的脑袋,两只圆圆的眼睛,还有一个圆圆的", "新年快乐,万事如意!在这辞旧迎新的美好时刻,我谨代表《农村新技术》杂志社全体同仁,向一直以来关心、支持《农村新技术》杂志发展的各级领导、各界朋友和广大读者致以最诚挚的", ] model_inputs = tokenizer(texts, return_tensors="pt", padding=True) token_ids = model.generate(**model_inputs) output_texts = tokenizer.batch_decode(token_ids) self.assertEqual(expected_output, output_texts)
transformers/tests/models/cpmant/test_modeling_cpmant.py/0
{ "file_path": "transformers/tests/models/cpmant/test_modeling_cpmant.py", "repo_id": "transformers", "token_count": 4326 }
167
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the TensorFlow Data2VecVision model.""" from __future__ import annotations import collections.abc import inspect import unittest import numpy as np from transformers import Data2VecVisionConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFData2VecVisionForImageClassification, TFData2VecVisionForSemanticSegmentation, TFData2VecVisionModel, ) from transformers.modeling_tf_utils import keras if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class TFData2VecVisionModelTester: def __init__( self, parent, vocab_size=100, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, num_labels=3, scope=None, out_indices=[0, 1, 2, 3], ): self.parent = parent self.vocab_size = 100 self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.out_indices = out_indices self.num_labels = num_labels def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None pixel_labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) pixel_labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels, pixel_labels def get_config(self): return Data2VecVisionConfig( vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, out_indices=self.out_indices, ) def create_and_check_model(self, config, pixel_values, labels, pixel_labels): model = TFData2VecVisionModel(config=config) result = model(pixel_values, training=False) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = ( self.image_size if isinstance(self.image_size, collections.abc.Iterable) else (self.image_size, self.image_size) ) patch_size = ( self.patch_size if isinstance(self.image_size, collections.abc.Iterable) else (self.patch_size, self.patch_size) ) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.type_sequence_label_size model = TFData2VecVisionForImageClassification(config) result = model(pixel_values, labels=labels, training=False) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def create_and_check_for_image_segmentation(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.num_labels model = TFData2VecVisionForSemanticSegmentation(config) result = model(pixel_values, training=False) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) result = model(pixel_values, labels=pixel_labels) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels, pixel_labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict def prepare_config_and_inputs_for_keras_fit(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, _, _ = config_and_inputs inputs_dict = {"pixel_values": pixel_values, "labels": tf.zeros((self.batch_size))} return config, inputs_dict @require_tf class TFData2VecVisionModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as Data2VecVision does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( (TFData2VecVisionModel, TFData2VecVisionForImageClassification, TFData2VecVisionForSemanticSegmentation) if is_tf_available() else () ) pipeline_model_mapping = ( {"feature-extraction": TFData2VecVisionModel, "image-classification": TFData2VecVisionForImageClassification} if is_tf_available() else {} ) test_pruning = False test_onnx = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = TFData2VecVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=Data2VecVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="Data2VecVision does not use inputs_embeds") def test_inputs_embeds(self): # Data2VecVision does not use inputs_embeds pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (keras.layers.Layer)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, keras.layers.Layer)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_image_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True # in Data2VecVision, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) image_size = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) patch_size = ( self.model_tester.patch_size if isinstance(self.model_tester.patch_size, collections.abc.Iterable) else (self.model_tester.patch_size, self.model_tester.patch_size) ) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_len = num_patches + 1 encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) self.assertEqual(out_len + 1, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) # Data2VecVision has a different seq_length image_size = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) patch_size = ( self.model_tester.patch_size if isinstance(self.model_tester.patch_size, collections.abc.Iterable) else (self.model_tester.patch_size, self.model_tester.patch_size) ) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_length = num_patches + 1 self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) # Overriding this method since the base method won't be compatible with Data2VecVision. @slow def test_keras_fit(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Since `TFData2VecVisionModel` cannot operate with the default `fit()` method. if model_class.__name__ != "TFData2VecVisionModel": model = model_class(config) if getattr(model, "hf_compute_loss", None): # Test that model correctly compute the loss with kwargs _, prepared_for_class = self.model_tester.prepare_config_and_inputs_for_keras_fit() label_names = {"labels"} self.assertGreater(len(label_names), 0, msg="No matching label names found!") labels = {key: val for key, val in prepared_for_class.items() if key in label_names} inputs_minus_labels = { key: val for key, val in prepared_for_class.items() if key not in label_names } self.assertGreater(len(inputs_minus_labels), 0) model.compile(optimizer=keras.optimizers.SGD(0.0), run_eagerly=True) # Make sure the model fits without crashing regardless of where we pass the labels history1 = model.fit( prepared_for_class, validation_data=prepared_for_class, steps_per_epoch=1, validation_steps=1, shuffle=False, ) val_loss1 = history1.history["val_loss"][0] history2 = model.fit( inputs_minus_labels, labels, validation_data=(inputs_minus_labels, labels), steps_per_epoch=1, validation_steps=1, shuffle=False, ) val_loss2 = history2.history["val_loss"][0] self.assertTrue(np.allclose(val_loss1, val_loss2, atol=1e-2, rtol=1e-3)) def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=2e-4, name="outputs", attributes=None): # We override with a slightly higher tol value, as semseg models tend to diverge a bit more super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol, name, attributes) # Overriding this method since the base method won't be compatible with Data2VecVision. def test_loss_computation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Since `TFData2VecVisionModel` won't have labels against which we # could compute loss. if model_class.__name__ != "TFData2VecVisionModel": model = model_class(config) if getattr(model, "hf_compute_loss", None): # The number of elements in the loss should be the same as the number of elements in the label _, prepared_for_class = self.model_tester.prepare_config_and_inputs_for_keras_fit() added_label = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys(), reverse=True)[0] ] loss_size = tf.size(added_label) # Test that model correctly compute the loss with kwargs possible_input_names = {"input_ids", "pixel_values", "input_features"} input_name = possible_input_names.intersection(set(prepared_for_class)).pop() model_input = prepared_for_class.pop(input_name) loss = model(model_input, **prepared_for_class)[0] self.assertEqual(loss.shape, [loss_size]) # Test that model correctly compute the loss with a dict _, prepared_for_class = self.model_tester.prepare_config_and_inputs_for_keras_fit() loss = model(**prepared_for_class)[0] self.assertEqual(loss.shape, [loss_size]) # Test that model correctly compute the loss with a tuple label_keys = prepared_for_class.keys() - inputs_dict.keys() signature = inspect.signature(model.call).parameters signature_names = list(signature.keys()) # Create a dictionary holding the location of the tensors in the tuple tuple_index_mapping = {0: input_name} for label_key in label_keys: label_key_index = signature_names.index(label_key) tuple_index_mapping[label_key_index] = label_key sorted_tuple_index_mapping = sorted(tuple_index_mapping.items()) # Initialize a list with their default values, update the values and convert to a tuple list_input = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default) for index, value in sorted_tuple_index_mapping: list_input[index] = prepared_for_class[value] tuple_input = tuple(list_input) # Send to model loss = model(tuple_input[:-1])[0] self.assertEqual(loss.shape, [loss_size]) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "facebook/data2vec-vision-base-ft1k" model = TFData2VecVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_tf @require_vision class TFData2VecVisionModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( BeitImageProcessor.from_pretrained("facebook/data2vec-vision-base-ft1k") if is_vision_available() else None ) @slow def test_inference_image_classification_head_imagenet_1k(self): model = TFData2VecVisionForImageClassification.from_pretrained("facebook/data2vec-vision-base-ft1k") image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="tf") # forward pass outputs = model(**inputs) logits = outputs.logits # verify the logits expected_shape = tf.convert_to_tensor([1, 1000]) self.assertEqual(logits.shape, expected_shape) expected_slice = tf.convert_to_tensor([0.3277, -0.1395, 0.0911]) tf.debugging.assert_near(logits[0, :3], expected_slice, atol=1e-4) expected_top2 = [model.config.label2id[i] for i in ["remote control, remote", "tabby, tabby cat"]] self.assertEqual(tf.nn.top_k(outputs.logits[0], 2).indices.numpy().tolist(), expected_top2)
transformers/tests/models/data2vec/test_modeling_tf_data2vec_vision.py/0
{ "file_path": "transformers/tests/models/data2vec/test_modeling_tf_data2vec_vision.py", "repo_id": "transformers", "token_count": 9900 }
168
# coding=utf-8 # Copyright 2020 Huggingface # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from transformers import ( DPRContextEncoderTokenizer, DPRContextEncoderTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast, DPRReaderOutput, DPRReaderTokenizer, DPRReaderTokenizerFast, ) from transformers.testing_utils import require_tokenizers, slow from transformers.tokenization_utils_base import BatchEncoding from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class DPRContextEncoderTokenizationTest(BertTokenizationTest): tokenizer_class = DPRContextEncoderTokenizer rust_tokenizer_class = DPRContextEncoderTokenizerFast test_rust_tokenizer = True from_pretrained_id = "facebook/dpr-ctx_encoder-single-nq-base" @require_tokenizers class DPRQuestionEncoderTokenizationTest(BertTokenizationTest): tokenizer_class = DPRQuestionEncoderTokenizer rust_tokenizer_class = DPRQuestionEncoderTokenizerFast test_rust_tokenizer = True from_pretrained_id = "facebook/dpr-ctx_encoder-single-nq-base" @require_tokenizers class DPRReaderTokenizationTest(BertTokenizationTest): tokenizer_class = DPRReaderTokenizer rust_tokenizer_class = DPRReaderTokenizerFast test_rust_tokenizer = True from_pretrained_id = "facebook/dpr-ctx_encoder-single-nq-base" @slow def test_decode_best_spans(self): tokenizer = self.tokenizer_class.from_pretrained("google-bert/bert-base-uncased") text_1 = tokenizer.encode("question sequence", add_special_tokens=False) text_2 = tokenizer.encode("title sequence", add_special_tokens=False) text_3 = tokenizer.encode("text sequence " * 4, add_special_tokens=False) input_ids = [[101] + text_1 + [102] + text_2 + [102] + text_3] reader_input = BatchEncoding({"input_ids": input_ids}) start_logits = [[0] * len(input_ids[0])] end_logits = [[0] * len(input_ids[0])] relevance_logits = [0] reader_output = DPRReaderOutput(start_logits, end_logits, relevance_logits) start_index, end_index = 8, 9 start_logits[0][start_index] = 10 end_logits[0][end_index] = 10 predicted_spans = tokenizer.decode_best_spans(reader_input, reader_output) self.assertEqual(predicted_spans[0].start_index, start_index) self.assertEqual(predicted_spans[0].end_index, end_index) self.assertEqual(predicted_spans[0].doc_id, 0) @slow def test_call(self): tokenizer = self.tokenizer_class.from_pretrained("google-bert/bert-base-uncased") text_1 = tokenizer.encode("question sequence", add_special_tokens=False) text_2 = tokenizer.encode("title sequence", add_special_tokens=False) text_3 = tokenizer.encode("text sequence", add_special_tokens=False) expected_input_ids = [101] + text_1 + [102] + text_2 + [102] + text_3 encoded_input = tokenizer(questions=["question sequence"], titles=["title sequence"], texts=["text sequence"]) self.assertIn("input_ids", encoded_input) self.assertIn("attention_mask", encoded_input) self.assertListEqual(encoded_input["input_ids"][0], expected_input_ids)
transformers/tests/models/dpr/test_tokenization_dpr.py/0
{ "file_path": "transformers/tests/models/dpr/test_tokenization_dpr.py", "repo_id": "transformers", "token_count": 1358 }
169
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch emu3 model.""" import tempfile import unittest import numpy as np from transformers import Emu3Processor, GPT2TokenizerFast from transformers.utils import is_vision_available from ...test_processing_common import ProcessorTesterMixin if is_vision_available(): from transformers import Emu3ImageProcessor class Emu3ProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = Emu3Processor def setUp(self): self.tmpdirname = tempfile.mkdtemp() image_processor = Emu3ImageProcessor() extra_special_tokens = extra_special_tokens = { "image_token": "<image>", "boi_token": "<|image start|>", "eoi_token": "<|image end|>", "image_wrapper_token": "<|image token|>", "eof_token": "<|extra_201|>", } tokenizer = GPT2TokenizerFast.from_pretrained( "openai-community/gpt2", extra_special_tokens=extra_special_tokens ) tokenizer.pad_token_id = 0 tokenizer.sep_token_id = 1 processor = self.processor_class( image_processor=image_processor, tokenizer=tokenizer, chat_template="dummy_template" ) processor.save_pretrained(self.tmpdirname) def test_processor_for_generation(self): processor_components = self.prepare_components() processor = self.processor_class(**processor_components) # we don't need an image as input because the model will generate one input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, return_for_image_generation=True, return_tensors="pt") self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "image_sizes"]) self.assertEqual(inputs[self.text_input_name].shape[-1], 8) # when `return_for_image_generation` is set, we raise an error that image should not be provided with self.assertRaises(ValueError): inputs = processor( text=input_str, images=image_input, return_for_image_generation=True, return_tensors="pt" ) def test_processor_postprocess(self): processor_components = self.prepare_components() processor = self.processor_class(**processor_components) input_str = "lower newer" orig_image_input = self.prepare_image_inputs() orig_image = np.array(orig_image_input).transpose(2, 0, 1) inputs = processor(text=input_str, images=orig_image, do_resize=False, return_tensors="np") normalized_image_input = inputs.pixel_values unnormalized_images = processor.postprocess(normalized_image_input, return_tensors="np")["pixel_values"] # For an image where pixels go from 0 to 255 the diff can be 1 due to some numerical precision errors when scaling and unscaling self.assertTrue(np.abs(orig_image - unnormalized_images).max() >= 1)
transformers/tests/models/emu3/test_processor_emu3.py/0
{ "file_path": "transformers/tests/models/emu3/test_processor_emu3.py", "repo_id": "transformers", "token_count": 1341 }
170
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Falcon model.""" import unittest from parameterized import parameterized from transformers import ( AutoModelForCausalLM, AutoTokenizer, FalconConfig, is_torch_available, set_seed, ) from transformers.testing_utils import ( require_bitsandbytes, require_torch, require_torch_sdpa, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, ) from transformers.models.falcon.modeling_falcon import ( FalconRotaryEmbedding, ) class FalconModelTester: def __init__( self, parent, batch_size=3, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return FalconConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, pad_token_id=1, new_decoder_architecture=True, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = FalconModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = FalconModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = FalconForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = FalconForCausalLM(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class FalconModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( FalconModel, FalconForCausalLM, FalconForSequenceClassification, FalconForTokenClassification, FalconForQuestionAnswering, ) if is_torch_available() else () ) all_generative_model_classes = (FalconForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": FalconModel, "question-answering": FalconForQuestionAnswering, "text-classification": FalconForSequenceClassification, "text-generation": FalconForCausalLM, "token-classification": FalconForTokenClassification, "zero-shot": FalconForSequenceClassification, } if is_torch_available() else {} ) test_headmasking = False test_pruning = False # TODO (ydshieh): Check this. See https://app.circleci.com/pipelines/github/huggingface/transformers/79245/workflows/9490ef58-79c2-410d-8f51-e3495156cf9c/jobs/1012146 def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): return True def setUp(self): self.model_tester = FalconModelTester(self) self.config_tester = ConfigTester(self, config_class=FalconConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_position_embedding_types(self): config, *inputs = self.model_tester.prepare_config_and_inputs() for alibi in [True, False]: config.alibi = alibi self.model_tester.create_and_check_model(config, *inputs) def test_falcon_sequence_classification_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = FalconForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_falcon_sequence_classification_model_for_single_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "single_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = FalconForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_falcon_sequence_classification_model_for_multi_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "multi_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor( [self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float) model = FalconForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_past_key_values_format(self): # Falcon can have different numbers of KV-heads than the number of query heads, so we need # to override this test to use the right head counts. for model_class in self.all_generative_model_classes: config, inputs = self.model_tester.prepare_config_and_inputs_for_common() # If it doesn't support cache, pass the test if not hasattr(config, "use_cache"): self.skipTest(reason="Model does not support cache") model = model_class(config).to(torch_device) if "use_cache" not in inputs: inputs["use_cache"] = True outputs = model(**inputs) # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) if "past_key_values" not in outputs: self.skipTest(reason="Model does not return past_key_values") num_hidden_layers = ( getattr(config, "decoder_layers", None) or getattr(config, "num_decoder_layers", None) or config.num_hidden_layers ) num_attention_heads = getattr(config, "num_kv_heads", config.num_attention_heads) embed_dim = getattr(config, "d_model", config.hidden_size) per_head_embed_dim = embed_dim // num_attention_heads past_kv = outputs["past_key_values"] self.assertEqual(len(past_kv), num_hidden_layers) batch_size, seq_length = inputs["input_ids"].shape for i in range(num_hidden_layers): if config.new_decoder_architecture: num_attention_heads = config.num_attention_heads elif config.multi_query: num_attention_heads = 1 self.assertEqual(len(past_kv[0]), 2) # K V for the decoder = 2 self.assertEqual( past_kv[i][0].shape, (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) self.assertEqual( past_kv[i][1].shape, (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) @parameterized.expand([("linear",), ("dynamic",)]) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_model_rope_scaling_from_config with Llama->Falcon def test_model_rope_scaling_from_config(self, scaling_type): config, _ = self.model_tester.prepare_config_and_inputs_for_common() short_input = ids_tensor([1, 10], config.vocab_size) long_input = ids_tensor([1, int(config.max_position_embeddings * 1.5)], config.vocab_size) set_seed(42) # Fixed seed at init time so the two models get the same random weights original_model = FalconModel(config) original_model.to(torch_device) original_model.eval() original_short_output = original_model(short_input).last_hidden_state original_long_output = original_model(long_input).last_hidden_state set_seed(42) # Fixed seed at init time so the two models get the same random weights config.rope_scaling = {"type": scaling_type, "factor": 10.0} scaled_model = FalconModel(config) scaled_model.to(torch_device) scaled_model.eval() scaled_short_output = scaled_model(short_input).last_hidden_state scaled_long_output = scaled_model(long_input).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": torch.testing.assert_close(original_short_output, scaled_short_output, rtol=1e-5, atol=1e-5) else: self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5)) # The output should be different for long inputs self.assertFalse(torch.allclose(original_long_output, scaled_long_output, atol=1e-5)) # Copied from tests.models.gpt_neox.test_modeling_gpt_neox.GPTNeoXModelTest.test_model_rope_scaling with GPTNeoX->Falcon def test_model_rope_scaling(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() scaling_factor = 10 short_input_length = 10 long_input_length = int(config.max_position_embeddings * 1.5) # Inputs x = torch.randn(1, dtype=torch.float32, device=torch_device) # used exlusively to get the dtype and the device position_ids_short = torch.arange(short_input_length, dtype=torch.long, device=torch_device) position_ids_short = position_ids_short.unsqueeze(0) position_ids_long = torch.arange(long_input_length, dtype=torch.long, device=torch_device) position_ids_long = position_ids_long.unsqueeze(0) # Sanity check original RoPE original_rope = FalconRotaryEmbedding(config).to(torch_device) original_cos_short, original_sin_short = original_rope(x, position_ids_short) original_cos_long, original_sin_long = original_rope(x, position_ids_long) torch.testing.assert_close(original_cos_short, original_cos_long[:, :short_input_length, :]) torch.testing.assert_close(original_sin_short, original_sin_long[:, :short_input_length, :]) # Sanity check linear RoPE scaling # New position "x" should match original position with index "x/scaling_factor" config.rope_scaling = {"type": "linear", "factor": scaling_factor} linear_scaling_rope = FalconRotaryEmbedding(config).to(torch_device) linear_cos_short, linear_sin_short = linear_scaling_rope(x, position_ids_short) linear_cos_long, linear_sin_long = linear_scaling_rope(x, position_ids_long) torch.testing.assert_close(linear_cos_short, linear_cos_long[:, :short_input_length, :]) torch.testing.assert_close(linear_sin_short, linear_sin_long[:, :short_input_length, :]) for new_position in range(0, long_input_length, scaling_factor): original_position = int(new_position // scaling_factor) torch.testing.assert_close(linear_cos_long[:, new_position, :], original_cos_long[:, original_position, :]) torch.testing.assert_close(linear_sin_long[:, new_position, :], original_sin_long[:, original_position, :]) # Sanity check Dynamic NTK RoPE scaling # Scaling should only be observed after a long input is fed. We can observe that the frequencies increase # with scaling_factor (or that `inv_freq` decreases) config.rope_scaling = {"type": "dynamic", "factor": scaling_factor} ntk_scaling_rope = FalconRotaryEmbedding(config).to(torch_device) ntk_cos_short, ntk_sin_short = ntk_scaling_rope(x, position_ids_short) ntk_cos_long, ntk_sin_long = ntk_scaling_rope(x, position_ids_long) torch.testing.assert_close(ntk_cos_short, original_cos_short) torch.testing.assert_close(ntk_sin_short, original_sin_short) with self.assertRaises(AssertionError): torch.testing.assert_close(ntk_cos_long, original_cos_long) with self.assertRaises(AssertionError): torch.testing.assert_close(ntk_sin_long, original_sin_long) self.assertTrue((ntk_scaling_rope.inv_freq <= original_rope.inv_freq).all()) @require_torch class FalconLanguageGenerationTest(unittest.TestCase): @slow def test_lm_generate_falcon(self): tokenizer = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b") model = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b") model.eval() model.to(torch_device) inputs = tokenizer("My favorite food is", return_tensors="pt").to(torch_device) EXPECTED_OUTPUT = ( "My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday." ) output_ids = model.generate(**inputs, do_sample=False, max_new_tokens=19) output_str = tokenizer.batch_decode(output_ids)[0] self.assertEqual(output_str, EXPECTED_OUTPUT) @slow @require_bitsandbytes def test_lm_generate_falcon_11b(self): tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-11B", padding_side="left") model = FalconForCausalLM.from_pretrained( "tiiuae/falcon-11B", device_map={"": torch_device}, load_in_8bit=True ) model.eval() inputs = tokenizer( "Two roads diverged in a yellow wood,", return_tensors="pt", return_token_type_ids=False ).to(torch_device) EXPECTED_OUTPUT = "Two roads diverged in a yellow wood,\nAnd sorry I could not travel both\n" output_ids = model.generate(**inputs, do_sample=False, max_new_tokens=9) output_str = tokenizer.batch_decode(output_ids)[0] self.assertEqual(output_str, EXPECTED_OUTPUT) @slow def test_lm_generation_big_models(self): # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]: tokenizer = AutoTokenizer.from_pretrained(repo) model = FalconForCausalLM.from_pretrained(repo) model.eval() model.to(torch_device) inputs = tokenizer("My favorite food is", return_tensors="pt").to(torch_device) # We just test that these run without errors - the models are randomly initialized # and so the actual text outputs will be garbage model.generate(**inputs, do_sample=False, max_new_tokens=4) model.generate(**inputs, do_sample=True, max_new_tokens=4) model.generate(**inputs, num_beams=2, max_new_tokens=4) @slow def test_lm_generation_use_cache(self): # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers with torch.no_grad(): for repo in [ "Rocketknight1/falcon-rw-1b", "Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b", ]: tokenizer = AutoTokenizer.from_pretrained(repo) model = FalconForCausalLM.from_pretrained(repo) model.eval() model.to(device=torch_device) inputs = tokenizer("My favorite food is", return_tensors="pt").to(torch_device) # Test results are the same with and without cache outputs_no_cache = model.generate(**inputs, do_sample=False, max_new_tokens=20, use_cache=False) outputs_cache = model.generate(**inputs, do_sample=False, max_new_tokens=20, use_cache=True) self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0) @require_bitsandbytes @slow def test_batched_generation(self): tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-7b", padding_side="left") tokenizer.pad_token = tokenizer.eos_token model = AutoModelForCausalLM.from_pretrained( "tiiuae/falcon-7b", device_map={"": torch_device}, load_in_4bit=True, ) test_text = "A sequence: 1, 2" # should generate the rest of the sequence unpadded_inputs = tokenizer([test_text], return_tensors="pt").to("cuda:0") unpadded_gen_out = model.generate(**unpadded_inputs, max_new_tokens=20) unpadded_gen_text = tokenizer.batch_decode(unpadded_gen_out, skip_special_tokens=True) dummy_text = "This is a longer text " * 2 # forces left-padding on `test_text` padded_inputs = tokenizer([test_text, dummy_text], return_tensors="pt", padding=True).to("cuda:0") padded_gen_out = model.generate(**padded_inputs, max_new_tokens=20) padded_gen_text = tokenizer.batch_decode(padded_gen_out, skip_special_tokens=True) expected_output = "A sequence: 1, 2, 3, 4, 5, 6, 7, 8, " self.assertLess(unpadded_inputs.input_ids.shape[-1], padded_inputs.input_ids.shape[-1]) # left-padding exists self.assertEqual(unpadded_gen_text[0], expected_output) self.assertEqual(padded_gen_text[0], expected_output) @slow @require_torch_sdpa def test_falcon_alibi_sdpa_matches_eager(self): input_ids = torch.randint(0, 1000, (5, 20)) config = FalconConfig( vocab_size=1000, hidden_size=64, num_hidden_layers=3, num_attention_heads=4, new_decoder_architecture=True, alibi=True, ) falcon = FalconForCausalLM(config) falcon = falcon.eval() with torch.no_grad(): # output_attentions=True dispatches to eager path falcon_output_eager = falcon(input_ids, output_attentions=True)[0] falcon_output_sdpa = falcon(input_ids)[0] torch.testing.assert_close(falcon_output_eager, falcon_output_sdpa, rtol=1e-3, atol=1e-3)
transformers/tests/models/falcon/test_modeling_falcon.py/0
{ "file_path": "transformers/tests/models/falcon/test_modeling_falcon.py", "repo_id": "transformers", "token_count": 12068 }
171
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Gemma model.""" import tempfile import unittest import pytest from packaging import version from transformers import AutoModelForCausalLM, AutoTokenizer, GemmaConfig, is_torch_available from transformers.generation.configuration_utils import GenerationConfig from transformers.testing_utils import ( is_flaky, require_bitsandbytes, require_flash_attn, require_read_token, require_torch, require_torch_accelerator, require_torch_gpu, require_torch_sdpa, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GemmaForCausalLM, GemmaForSequenceClassification, GemmaForTokenClassification, GemmaModel, ) @require_torch class GemmaModelTester: config_class = GemmaConfig if is_torch_available(): model_class = GemmaModel for_causal_lm_class = GemmaForCausalLM for_sequence_class = GemmaForSequenceClassification for_token_class = GemmaForTokenClassification def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, num_key_value_heads=2, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, pad_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.pad_token_id = pad_token_id self.scope = scope self.head_dim = self.hidden_size // self.num_attention_heads # Copied from tests.models.mistral.test_modeling_mistral.MistralModelTester.prepare_config_and_inputs def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = torch.tril(torch.ones_like(input_ids).to(torch_device)) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return self.config_class( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, num_key_value_heads=self.num_key_value_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, head_dim=self.head_dim, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = self.model_class(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = self.model_class(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = self.for_causal_lm_class(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = self.for_causal_lm_class(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.prepare_config_and_inputs_for_common with Llama->Gemma def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class GemmaModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (GemmaModel, GemmaForCausalLM, GemmaForSequenceClassification, GemmaForTokenClassification) if is_torch_available() else () ) all_generative_model_classes = (GemmaForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": GemmaModel, "text-classification": GemmaForSequenceClassification, "token-classification": GemmaForTokenClassification, "text-generation": GemmaForCausalLM, "zero-shot": GemmaForSequenceClassification, } if is_torch_available() else {} ) test_headmasking = False test_pruning = False # Need to remove 0.9 in `test_cpu_offload` # This is because we are hitting edge cases with the causal_mask buffer model_split_percents = [0.5, 0.6] # used in `test_torch_compile_for_training` _torch_compile_train_cls = GemmaForCausalLM if is_torch_available() else None # TODO (ydshieh): Check this. See https://app.circleci.com/pipelines/github/huggingface/transformers/79245/workflows/9490ef58-79c2-410d-8f51-e3495156cf9c/jobs/1012146 def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): return True def setUp(self): self.model_tester = GemmaModelTester(self) self.config_tester = ConfigTester(self, config_class=GemmaConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_Gemma_sequence_classification_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() print(config) config.num_labels = 3 input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = self.model_tester.for_sequence_class(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_Gemma_sequence_classification_model_for_single_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "single_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = self.model_tester.for_sequence_class(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_Gemma_sequence_classification_model_for_multi_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "multi_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor( [self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float) model = self.model_tester.for_sequence_class(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_Gemma_token_classification_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) token_labels = ids_tensor([self.model_tester.batch_size, self.model_tester.seq_length], config.num_labels) model = self.model_tester.for_token_class(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=token_labels) self.assertEqual( result.logits.shape, (self.model_tester.batch_size, self.model_tester.seq_length, self.model_tester.num_labels), ) @unittest.skip(reason="Gemma buffers include complex numbers, which breaks this test") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="Gemma uses GQA on all models so the KV cache is a non standard format") def test_past_key_values_format(self): pass @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @slow def test_flash_attn_2_inference_equivalence_right_padding(self): self.skipTest(reason="Gemma flash attention does not support right padding") @require_torch_sdpa @require_torch_accelerator @slow def test_sdpa_equivalence(self): for model_class in self.all_model_classes: if not model_class._supports_sdpa: self.skipTest(reason="Model does not support SDPA") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_sdpa = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, attn_implementation="sdpa" ) model_sdpa.to(torch_device) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.float16, attn_implementation="eager") model.to(torch_device) dummy_input = inputs_dict[model_class.main_input_name] dummy_input = dummy_input.to(torch_device) outputs = model(dummy_input, output_hidden_states=True) outputs_sdpa = model_sdpa(dummy_input, output_hidden_states=True) logits = outputs.hidden_states[-1] logits_sdpa = outputs_sdpa.hidden_states[-1] # gemma sdpa needs a high tolerance assert torch.allclose(logits_sdpa, logits, atol=3e-3) @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @is_flaky() @slow def test_flash_attn_2_equivalence(self): for model_class in self.all_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(reason="Model does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, attn_implementation="flash_attention_2" ) model_fa.to(torch_device) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.float16, attn_implementation="eager") model.to(torch_device) dummy_input = inputs_dict[model_class.main_input_name] dummy_input = dummy_input.to(torch_device) outputs = model(dummy_input, output_hidden_states=True) outputs_fa = model_fa(dummy_input, output_hidden_states=True) logits = outputs.hidden_states[-1] logits_fa = outputs_fa.hidden_states[-1] # gemma flash attention 2 needs a high tolerance assert torch.allclose(logits_fa, logits, atol=3e-3) @slow @require_torch_accelerator class GemmaIntegrationTest(unittest.TestCase): input_text = ["Hello I am doing", "Hi today"] # This variable is used to determine which CUDA device are we using for our runners (A10 or T4) # Depending on the hardware we get different logits / generations cuda_compute_capability_major_version = None @classmethod def setUpClass(cls): if is_torch_available() and torch.cuda.is_available(): # 8 is for A100 / A10 and 7 for T4 cls.cuda_compute_capability_major_version = torch.cuda.get_device_capability()[0] @require_read_token def test_model_2b_fp16(self): model_id = "google/gemma-2b" EXPECTED_TEXTS = [ "Hello I am doing a project on the 1990s and I need to know what the most popular music", "Hi today I am going to share with you a very easy and simple recipe of <strong><em>Kaju Kat", ] model = AutoModelForCausalLM.from_pretrained(model_id, low_cpu_mem_usage=True, torch_dtype=torch.float16).to( torch_device ) model.generation_config.cache_implementation = "static" tokenizer = AutoTokenizer.from_pretrained(model_id) inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(output_text, EXPECTED_TEXTS) @require_read_token def test_model_2b_bf16(self): model_id = "google/gemma-2b" EXPECTED_TEXTS = [ "Hello I am doing a project on the 1990s and I need to know what the most popular music", "Hi today I am going to share with you a very easy and simple recipe of <strong><em>Khichdi", ] model = AutoModelForCausalLM.from_pretrained(model_id, low_cpu_mem_usage=True, torch_dtype=torch.bfloat16).to( torch_device ) tokenizer = AutoTokenizer.from_pretrained(model_id) inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(output_text, EXPECTED_TEXTS) @require_read_token def test_model_2b_eager(self): model_id = "google/gemma-2b" EXPECTED_TEXTS = [ "Hello I am doing a project on the 1990s and I need to know what the most popular music", "Hi today I am going to share with you a very easy and simple recipe of <strong><em>Khichdi", ] model = AutoModelForCausalLM.from_pretrained( model_id, low_cpu_mem_usage=True, torch_dtype=torch.bfloat16, attn_implementation="eager" ) model.to(torch_device) tokenizer = AutoTokenizer.from_pretrained(model_id) inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(output_text, EXPECTED_TEXTS) @require_torch_sdpa @require_read_token def test_model_2b_sdpa(self): model_id = "google/gemma-2b" EXPECTED_TEXTS = [ "Hello I am doing a project on the 1990s and I need to know what the most popular music", "Hi today I am going to share with you a very easy and simple recipe of <strong><em>Khichdi", ] model = AutoModelForCausalLM.from_pretrained( model_id, low_cpu_mem_usage=True, torch_dtype=torch.bfloat16, attn_implementation="sdpa" ) model.to(torch_device) tokenizer = AutoTokenizer.from_pretrained(model_id) inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(output_text, EXPECTED_TEXTS) @require_flash_attn @require_read_token @pytest.mark.flash_attn_test def test_model_2b_flash_attn(self): model_id = "google/gemma-2b" EXPECTED_TEXTS = [ "Hello I am doing a project on the 1990s and I need to know what the most popular music", "Hi today I am going to share with you a very easy and simple recipe of <strong><em>Kaju Kat", ] model = AutoModelForCausalLM.from_pretrained( model_id, low_cpu_mem_usage=True, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2" ) model.to(torch_device) tokenizer = AutoTokenizer.from_pretrained(model_id) inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(output_text, EXPECTED_TEXTS) @require_bitsandbytes @require_read_token def test_model_2b_4bit(self): model_id = "google/gemma-2b" EXPECTED_TEXTS = [ "Hello I am doing a project and I need to make a 3d model of a house. I have been using", "Hi today I'd like to share with you my experience with the new wattpad wattpad wattpad wattpad wattpad wattpad wattpad", ] model = AutoModelForCausalLM.from_pretrained(model_id, low_cpu_mem_usage=True, load_in_4bit=True) tokenizer = AutoTokenizer.from_pretrained(model_id) inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(output_text, EXPECTED_TEXTS) @unittest.skip(reason="The test will not fit our CI runners") @require_read_token def test_model_7b_fp32(self): model_id = "google/gemma-7b" EXPECTED_TEXTS = [ "Hello my name is ***** ***** I will be assisting you today. I am sorry to hear about your issue. I will", "Hi,\n\nI have a problem with my 2005 1.6 16", ] model = AutoModelForCausalLM.from_pretrained(model_id, low_cpu_mem_usage=True).to(torch_device) tokenizer = AutoTokenizer.from_pretrained(model_id) inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(output_text, EXPECTED_TEXTS) @require_read_token def test_model_7b_fp16(self): if self.cuda_compute_capability_major_version == 7: self.skipTest("This test is failing (`torch.compile` fails) on Nvidia T4 GPU (OOM).") model_id = "google/gemma-7b" EXPECTED_TEXTS = [ """Hello I am doing a project on a 1999 4.0L 4x4. I""", "Hi today I am going to show you how to make a simple and easy to make a DIY 3D", ] model = AutoModelForCausalLM.from_pretrained(model_id, low_cpu_mem_usage=True, torch_dtype=torch.float16).to( torch_device ) tokenizer = AutoTokenizer.from_pretrained(model_id) inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(output_text, EXPECTED_TEXTS) @require_read_token def test_model_7b_bf16(self): if self.cuda_compute_capability_major_version == 7: self.skipTest("This test is failing (`torch.compile` fails) on Nvidia T4 GPU (OOM).") model_id = "google/gemma-7b" # Key 9 for MI300, Key 8 for A100/A10, and Key 7 for T4. # # Note: Key 9 is currently set for MI300, but may need potential future adjustments for H100s, # considering differences in hardware processing and potential deviations in generated text. EXPECTED_TEXTS = { 7: [ """Hello I am doing a project on a 1991 240sx and I am trying to find""", "Hi today I am going to show you how to make a very simple and easy to make a very simple and", ], 8: [ "Hello I am doing a project for my school and I am trying to make a program that will read a .txt file", "Hi today I am going to show you how to make a very simple and easy to make a very simple and", ], 9: [ "Hello I am doing a project for my school and I am trying to get a servo to move a certain amount of degrees", "Hi today I am going to show you how to make a very simple and easy to make DIY light up sign", ], } model = AutoModelForCausalLM.from_pretrained(model_id, low_cpu_mem_usage=True, torch_dtype=torch.bfloat16).to( torch_device ) tokenizer = AutoTokenizer.from_pretrained(model_id) inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(output_text, EXPECTED_TEXTS[self.cuda_compute_capability_major_version]) @require_read_token def test_model_7b_fp16_static_cache(self): if self.cuda_compute_capability_major_version == 7: self.skipTest("This test is failing (`torch.compile` fails) on Nvidia T4 GPU (OOM).") model_id = "google/gemma-7b" EXPECTED_TEXTS = [ """Hello I am doing a project on a 1999 4.0L 4x4. I""", "Hi today I am going to show you how to make a simple and easy to make a DIY 3D", ] model = AutoModelForCausalLM.from_pretrained(model_id, low_cpu_mem_usage=True, torch_dtype=torch.float16).to( torch_device ) model.generation_config.cache_implementation = "static" tokenizer = AutoTokenizer.from_pretrained(model_id) inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(output_text, EXPECTED_TEXTS) @require_bitsandbytes @require_read_token def test_model_7b_4bit(self): model_id = "google/gemma-7b" EXPECTED_TEXTS = [ "Hello I am doing a project for my school and I am trying to make a program that will take a number and then", "Hi today I am going to talk about the best way to get rid of acne. miniaturing is a very", ] model = AutoModelForCausalLM.from_pretrained(model_id, low_cpu_mem_usage=True, load_in_4bit=True) tokenizer = AutoTokenizer.from_pretrained(model_id) inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(output_text, EXPECTED_TEXTS) @slow @require_torch_gpu @require_read_token def test_compile_static_cache(self): # `torch==2.2` will throw an error on this test (as in other compilation tests), but torch==2.1.2 and torch>2.2 # work as intended. See https://github.com/pytorch/pytorch/issues/121943 if version.parse(torch.__version__) < version.parse("2.3.0"): self.skipTest(reason="This test requires torch >= 2.3 to run.") NUM_TOKENS_TO_GENERATE = 40 EXPECTED_TEXT_COMPLETION = [ "Hello I am doing a project on the 1990s and I need to know what the most popular music was in the 1990s. I have looked on the internet and I have found", "Hi today\nI have a problem with my 2007 1.9 tdi 105bhp.\nI have a problem with the engine management light on.\nI have checked the", ] prompts = ["Hello I am doing", "Hi today"] tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b", pad_token="</s>", padding_side="right") model = GemmaForCausalLM.from_pretrained("google/gemma-2b", device_map=torch_device, torch_dtype=torch.float16) inputs = tokenizer(prompts, return_tensors="pt", padding=True).to(model.device) # Dynamic Cache generated_ids = model.generate(**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False) dynamic_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, dynamic_text) # Both GPU architectures have the same output # Static Cache generated_ids = model.generate( **inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, cache_implementation="static" ) static_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, static_text) # Static Cache + compile model.forward = torch.compile(model.forward, mode="reduce-overhead", fullgraph=True) generated_ids = model.generate( **inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, cache_implementation="static" ) static_compiled_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, static_compiled_text) @slow @require_read_token def test_export_static_cache(self): if version.parse(torch.__version__) < version.parse("2.3.0"): self.skipTest(reason="This test requires torch >= 2.3 to run.") from transformers.integrations.executorch import ( TorchExportableModuleWithStaticCache, convert_and_export_with_cache, ) tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b", pad_token="</s>", padding_side="right") EXPECTED_TEXT_COMPLETION = [ "Hello I am doing a project on the 1990s and I need to know what the most popular music was in the 1990s. I have looked on the internet and I have found", ] max_generation_length = tokenizer(EXPECTED_TEXT_COMPLETION, return_tensors="pt", padding=True)[ "input_ids" ].shape[-1] # Load model device = "cpu" dtype = torch.bfloat16 cache_implementation = "static" attn_implementation = "sdpa" batch_size = 1 model = GemmaForCausalLM.from_pretrained( "google/gemma-2b", device_map=device, torch_dtype=dtype, attn_implementation=attn_implementation, generation_config=GenerationConfig( use_cache=True, cache_implementation=cache_implementation, max_length=max_generation_length, cache_config={ "batch_size": batch_size, "max_cache_len": max_generation_length, }, ), ) prompts = ["Hello I am doing"] prompt_tokens = tokenizer(prompts, return_tensors="pt", padding=True).to(model.device) prompt_token_ids = prompt_tokens["input_ids"] max_new_tokens = max_generation_length - prompt_token_ids.shape[-1] # Static Cache + eager eager_generated_ids = model.generate( **prompt_tokens, max_new_tokens=max_new_tokens, do_sample=False, cache_implementation=cache_implementation ) eager_generated_text = tokenizer.batch_decode(eager_generated_ids, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, eager_generated_text) # Static Cache + export exported_program = convert_and_export_with_cache(model) ep_generated_ids = TorchExportableModuleWithStaticCache.generate( exported_program=exported_program, prompt_token_ids=prompt_token_ids, max_new_tokens=max_new_tokens ) ep_generated_text = tokenizer.batch_decode(ep_generated_ids, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, ep_generated_text) def test_model_2b_bf16_dola(self): model_id = "google/gemma-2b" # ground truth text generated with dola_layers="low", repetition_penalty=1.2 EXPECTED_TEXTS = [ "Hello I am doing an experiment and need to get the mass of a block. The problem is, it has no scale", "Hi today we have the review for a <strong>2016/2017</strong> season of", ] model = AutoModelForCausalLM.from_pretrained(model_id, low_cpu_mem_usage=True, torch_dtype=torch.bfloat16).to( torch_device ) tokenizer = AutoTokenizer.from_pretrained(model_id) inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device) output = model.generate( **inputs, max_new_tokens=20, do_sample=False, dola_layers="low", repetition_penalty=1.2 ) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(output_text, EXPECTED_TEXTS)
transformers/tests/models/gemma/test_modeling_gemma.py/0
{ "file_path": "transformers/tests/models/gemma/test_modeling_gemma.py", "repo_id": "transformers", "token_count": 16364 }
172
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import unittest from transformers import AutoTokenizer, GPT2Tokenizer, GPT2TokenizerFast from transformers.models.gpt2.tokenization_gpt2 import VOCAB_FILES_NAMES from transformers.testing_utils import require_jinja, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class GPT2TokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "openai-community/gpt2" tokenizer_class = GPT2Tokenizer rust_tokenizer_class = GPT2TokenizerFast test_rust_tokenizer = True from_pretrained_kwargs = {"add_prefix_space": True} test_seq2seq = False def setUp(self): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", "<|endoftext|>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] self.special_tokens_map = {"unk_token": "<unk>"} self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) def get_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return GPT2Tokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_rust_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return GPT2TokenizerFast.from_pretrained(self.tmpdirname, **kwargs) def get_input_output_texts(self, tokenizer): input_text = "lower newer" output_text = "lower newer" return input_text, output_text def test_full_tokenizer(self): tokenizer = GPT2Tokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map) text = "lower newer" bpe_tokens = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"] tokens = tokenizer.tokenize(text, add_prefix_space=True) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + [tokenizer.unk_token] input_bpe_tokens = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) def test_rust_and_python_full_tokenizers(self): if not self.test_rust_tokenizer: self.skipTest(reason="test_rust_tokenizer is set to False") tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer(add_prefix_space=True) sequence = "lower newer" # Testing tokenization tokens = tokenizer.tokenize(sequence, add_prefix_space=True) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) # Testing conversion to ids without special tokens ids = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=True) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) # Testing conversion to ids with special tokens rust_tokenizer = self.get_rust_tokenizer(add_prefix_space=True) ids = tokenizer.encode(sequence, add_prefix_space=True) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) # Testing the unknown token input_tokens = tokens + [rust_tokenizer.unk_token] input_bpe_tokens = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) @unittest.skip def test_pretokenized_inputs(self, *args, **kwargs): # It's very difficult to mix/test pretokenization with byte-level # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def test_padding(self, max_length=15): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) # Simple input s = "This is a simple input" s2 = ["This is a simple input 1", "This is a simple input 2"] p = ("This is a simple input", "This is a pair") p2 = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(ValueError, tokenizer_r.encode, s, max_length=max_length, padding="max_length") # Simple input self.assertRaises(ValueError, tokenizer_r.encode_plus, s, max_length=max_length, padding="max_length") # Simple input self.assertRaises( ValueError, tokenizer_r.batch_encode_plus, s2, max_length=max_length, padding="max_length", ) # Pair input self.assertRaises(ValueError, tokenizer_r.encode, p, max_length=max_length, padding="max_length") # Pair input self.assertRaises(ValueError, tokenizer_r.encode_plus, p, max_length=max_length, padding="max_length") # Pair input self.assertRaises( ValueError, tokenizer_r.batch_encode_plus, p2, max_length=max_length, padding="max_length", ) def test_padding_if_pad_token_set_slow(self): tokenizer = GPT2Tokenizer.from_pretrained(self.tmpdirname, pad_token="<pad>") # Simple input s = "This is a simple input" s2 = ["This is a simple input looooooooong", "This is a simple input"] p = ("This is a simple input", "This is a pair") p2 = [ ("This is a simple input loooooong", "This is a simple input"), ("This is a simple pair loooooong", "This is a simple pair"), ] pad_token_id = tokenizer.pad_token_id out_s = tokenizer(s, padding="max_length", max_length=30, return_tensors="np") out_s2 = tokenizer(s2, padding=True, truncate=True, return_tensors="np") out_p = tokenizer(*p, padding="max_length", max_length=60, return_tensors="np") out_p2 = tokenizer(p2, padding=True, truncate=True, return_tensors="np") # s # test single string max_length padding self.assertEqual(out_s["input_ids"].shape[-1], 30) self.assertTrue(pad_token_id in out_s["input_ids"]) self.assertTrue(0 in out_s["attention_mask"]) # s2 # test automatic padding self.assertEqual(out_s2["input_ids"].shape[-1], 33) # long slice doesn't have padding self.assertFalse(pad_token_id in out_s2["input_ids"][0]) self.assertFalse(0 in out_s2["attention_mask"][0]) # short slice does have padding self.assertTrue(pad_token_id in out_s2["input_ids"][1]) self.assertTrue(0 in out_s2["attention_mask"][1]) # p # test single pair max_length padding self.assertEqual(out_p["input_ids"].shape[-1], 60) self.assertTrue(pad_token_id in out_p["input_ids"]) self.assertTrue(0 in out_p["attention_mask"]) # p2 # test automatic padding pair self.assertEqual(out_p2["input_ids"].shape[-1], 52) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_p2["input_ids"][0]) self.assertFalse(0 in out_p2["attention_mask"][0]) # short slice pair does have padding self.assertTrue(pad_token_id in out_p2["input_ids"][1]) self.assertTrue(0 in out_p2["attention_mask"][1]) def test_add_bos_token_slow(self): bos_token = "$$$" tokenizer = GPT2Tokenizer.from_pretrained(self.tmpdirname, bos_token=bos_token, add_bos_token=True) s = "This is a simple input" s2 = ["This is a simple input 1", "This is a simple input 2"] bos_token_id = tokenizer.bos_token_id out_s = tokenizer(s) out_s2 = tokenizer(s2) self.assertEqual(out_s.input_ids[0], bos_token_id) self.assertTrue(all(o[0] == bos_token_id for o in out_s2.input_ids)) decode_s = tokenizer.decode(out_s.input_ids) decode_s2 = tokenizer.batch_decode(out_s2.input_ids) self.assertTrue(decode_s.startswith(bos_token)) self.assertTrue(all(d.startswith(bos_token) for d in decode_s2)) @unittest.skip(reason="tokenizer has no padding token") def test_padding_different_model_input_name(self): pass def test_special_tokens_mask_input_pairs_and_bos_token(self): # TODO: change to self.get_tokenizers() when the fast version is implemented tokenizers = [self.get_tokenizer(do_lower_case=False, add_bos_token=True)] for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): sequence_0 = "Encode this." sequence_1 = "This one too please." encoded_sequence = tokenizer.encode(sequence_0, add_special_tokens=False) encoded_sequence += tokenizer.encode(sequence_1, add_special_tokens=False) encoded_sequence_dict = tokenizer.encode_plus( sequence_0, sequence_1, add_special_tokens=True, return_special_tokens_mask=True, ) encoded_sequence_w_special = encoded_sequence_dict["input_ids"] special_tokens_mask = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special)) filtered_sequence = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(encoded_sequence_w_special) ] filtered_sequence = [x for x in filtered_sequence if x is not None] self.assertEqual(encoded_sequence, filtered_sequence) @require_jinja def test_tokenization_for_chat(self): tokenizer = GPT2Tokenizer.from_pretrained(self.tmpdirname) tokenizer.chat_template = "{% for message in messages %}{{ message.content }}{{ eos_token }}{% endfor %}" test_chats = [ [{"role": "system", "content": "You are a helpful chatbot."}, {"role": "user", "content": "Hello!"}], [ {"role": "system", "content": "You are a helpful chatbot."}, {"role": "user", "content": "Hello!"}, {"role": "assistant", "content": "Nice to meet you."}, ], [{"role": "assistant", "content": "Nice to meet you."}, {"role": "user", "content": "Hello!"}], ] tokenized_chats = [tokenizer.apply_chat_template(test_chat) for test_chat in test_chats] # fmt: off expected_tokens = [[20, 1, 20, 10, 20, 4, 3, 10, 20, 10, 20, 3, 0, 20, 20, 20, 0, 10, 20, 20, 20, 6, 20, 1, 6, 20, 20, 20, 3, 0, 0, 1, 20, 20], [20, 1, 20, 10, 20, 4, 3, 10, 20, 10, 20, 3, 0, 20, 20, 20, 0, 10, 20, 20, 20, 6, 20, 1, 6, 20, 20, 20, 3, 0, 0, 1, 20, 20, 20, 7, 20, 3, 10, 6, 1, 10, 20, 3, 3, 6, 10, 20, 1, 20, 20, 20], [20, 7, 20, 3, 10, 6, 1, 10, 20, 3, 3, 6, 10, 20, 1, 20, 20, 20, 20, 3, 0, 0, 1, 20, 20]] # fmt: on for tokenized_chat, expected_tokens in zip(tokenized_chats, expected_tokens): self.assertListEqual(tokenized_chat, expected_tokens) @require_tokenizers class OPTTokenizationTest(unittest.TestCase): def test_serialize_deserialize_fast_opt(self): # More context: # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1 # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519 # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439 tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m", from_slow=True) text = "A photo of a cat" tokens_ids = tokenizer.encode( text, ) self.assertEqual(tokens_ids, [2, 250, 1345, 9, 10, 4758]) tokenizer.save_pretrained("test_opt") tokenizer = AutoTokenizer.from_pretrained("./test_opt") tokens_ids = tokenizer.encode( text, ) self.assertEqual(tokens_ids, [2, 250, 1345, 9, 10, 4758]) def test_fast_slow_equivalence(self): tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m", use_slow=True) text = "A photo of a cat" tokens_ids = tokenizer.encode( text, ) # Same as above self.assertEqual(tokens_ids, [2, 250, 1345, 9, 10, 4758]) @unittest.skip(reason="This test is failing because of a bug in the fast tokenizer") def test_users_can_modify_bos(self): tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m", from_slow=True) tokenizer.bos_token = "bos" tokenizer.bos_token_id = tokenizer.get_vocab()["bos"] text = "A photo of a cat" tokens_ids = tokenizer.encode( text, ) # We changed the bos token self.assertEqual(tokens_ids, [31957, 250, 1345, 9, 10, 4758]) tokenizer.save_pretrained("./tok") tokenizer = AutoTokenizer.from_pretrained("./tok") self.assertTrue(tokenizer.is_fast) tokens_ids = tokenizer.encode( text, ) self.assertEqual(tokens_ids, [31957, 250, 1345, 9, 10, 4758])
transformers/tests/models/gpt2/test_tokenization_gpt2.py/0
{ "file_path": "transformers/tests/models/gpt2/test_tokenization_gpt2.py", "repo_id": "transformers", "token_count": 7058 }
173
# coding=utf-8 # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import unittest from transformers import GPTJConfig, is_torch_available from transformers.testing_utils import ( require_torch, slow, tooslow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( AutoTokenizer, GPTJForCausalLM, GPTJForQuestionAnswering, GPTJForSequenceClassification, GPTJModel, ) class GPTJModelTester: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_token_type_ids=True, use_input_mask=True, use_labels=True, use_mc_token_ids=True, vocab_size=99, hidden_size=32, rotary_dim=4, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_token_type_ids = use_token_type_ids self.use_input_mask = use_input_mask self.use_labels = use_labels self.use_mc_token_ids = use_mc_token_ids self.vocab_size = vocab_size self.hidden_size = hidden_size self.rotary_dim = rotary_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = None self.bos_token_id = vocab_size - 1 self.eos_token_id = vocab_size - 1 self.pad_token_id = vocab_size - 1 def get_large_model_config(self): return GPTJConfig.from_pretrained("EleutherAI/gpt-j-6B") def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) mc_token_ids = None if self.use_mc_token_ids: mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def get_config(self): return GPTJConfig( vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, n_positions=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, rotary_dim=self.rotary_dim, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 300 return config def create_and_check_gptj_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = GPTJModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(len(result.past_key_values), config.n_layer) def create_and_check_gptj_model_past(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = GPTJModel(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model(input_ids, token_type_ids=token_type_ids, use_cache=True) outputs_use_cache_conf = model(input_ids, token_type_ids=token_type_ids) outputs_no_past = model(input_ids, token_type_ids=token_type_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) output, past = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) next_token_types = ids_tensor([self.batch_size, 1], self.type_vocab_size) # append to next input_ids and token_type_ids next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_token_type_ids = torch.cat([token_type_ids, next_token_types], dim=-1) output_from_no_past = model(next_input_ids, token_type_ids=next_token_type_ids)["last_hidden_state"] output_from_past = model(next_tokens, token_type_ids=next_token_types, past_key_values=past)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_gptj_model_attention_mask_past( self, config, input_ids, input_mask, head_mask, token_type_ids, *args ): model = GPTJModel(config=config) model.to(torch_device) model.eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = self.seq_length // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass output, past = model(input_ids, attention_mask=attn_mask).to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past, attention_mask=attn_mask)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_gptj_model_past_large_inputs( self, config, input_ids, input_mask, head_mask, token_type_ids, *args ): model = GPTJModel(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model(input_ids, token_type_ids=token_type_ids, attention_mask=input_mask, use_cache=True) output, past = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_token_types = ids_tensor([self.batch_size, 3], self.type_vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and token_type_ids next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_token_type_ids = torch.cat([token_type_ids, next_token_types], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, token_type_ids=next_token_type_ids, attention_mask=next_attention_mask )["last_hidden_state"] output_from_past = model( next_tokens, token_type_ids=next_token_types, attention_mask=next_attention_mask, past_key_values=past )["last_hidden_state"] self.parent.assertTrue(output_from_past.shape[1] == next_tokens.shape[1]) # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_lm_head_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = GPTJForCausalLM(config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_forward_and_backwards( self, config, input_ids, input_mask, head_mask, token_type_ids, *args, gradient_checkpointing=False ): model = GPTJForCausalLM(config) if gradient_checkpointing: model.gradient_checkpointing_enable() model.to(torch_device) result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) result.loss.backward() def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask} return config, inputs_dict @require_torch class GPTJModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (GPTJModel, GPTJForCausalLM, GPTJForSequenceClassification, GPTJForQuestionAnswering) if is_torch_available() else () ) all_generative_model_classes = (GPTJForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": GPTJModel, "question-answering": GPTJForQuestionAnswering, "text-classification": GPTJForSequenceClassification, "text-generation": GPTJForCausalLM, "zero-shot": GPTJForSequenceClassification, } if is_torch_available() else {} ) fx_compatible = True test_pruning = False test_missing_keys = False test_model_parallel = False test_head_masking = False def test_torch_fx(self): super().test_torch_fx() def test_torch_fx_output_loss(self): super().test_torch_fx_output_loss() # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): if ( pipeline_test_case_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast") ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False # special case for DoubleHeads model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) return inputs_dict def setUp(self): self.model_tester = GPTJModelTester(self) self.config_tester = ConfigTester(self, config_class=GPTJConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_gptj_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gptj_model(*config_and_inputs) def test_gptj_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gptj_model_past(*config_and_inputs) def test_gptj_model_att_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gptj_model_attention_mask_past(*config_and_inputs) def test_gptj_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gptj_model_past_large_inputs(*config_and_inputs) def test_gptj_lm_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*config_and_inputs) def test_gptj_gradient_checkpointing(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs, gradient_checkpointing=True) @tooslow def test_batch_generation(self): # Marked as @tooslow due to GPU OOM model = GPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B", revision="float16", torch_dtype=torch.float16) model.to(torch_device) tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B", revision="float16") tokenizer.padding_side = "left" # Define PAD Token = EOS Token = 50256 tokenizer.pad_token = tokenizer.eos_token model.config.pad_token_id = model.config.eos_token_id # use different length sentences to test batching sentences = [ "Hello, my dog is a little", "Today, I", ] inputs = tokenizer(sentences, return_tensors="pt", padding=True) input_ids = inputs["input_ids"].to(torch_device) token_type_ids = torch.cat( [ input_ids.new_full((input_ids.shape[0], input_ids.shape[1] - 1), 0), input_ids.new_full((input_ids.shape[0], 1), 500), ], dim=-1, ) outputs = model.generate( input_ids=input_ids, attention_mask=inputs["attention_mask"].to(torch_device), ) outputs_tt = model.generate( input_ids=input_ids, attention_mask=inputs["attention_mask"].to(torch_device), token_type_ids=token_type_ids, ) inputs_non_padded = tokenizer(sentences[0], return_tensors="pt").input_ids.to(torch_device) output_non_padded = model.generate(input_ids=inputs_non_padded) num_paddings = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item() inputs_padded = tokenizer(sentences[1], return_tensors="pt").input_ids.to(torch_device) output_padded = model.generate(input_ids=inputs_padded, max_length=model.config.max_length - num_paddings) batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True) batch_out_sentence_tt = tokenizer.batch_decode(outputs_tt, skip_special_tokens=True) non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True) padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True) expected_output_sentence = [ "Hello, my dog is a little over a year old and has been diagnosed with a heart murmur", "Today, I’m going to talk about the most important thing in the", ] self.assertListEqual(expected_output_sentence, batch_out_sentence) self.assertTrue(batch_out_sentence_tt != batch_out_sentence) # token_type_ids should change output self.assertListEqual(expected_output_sentence, [non_padded_sentence, padded_sentence]) @slow def test_model_from_pretrained(self): model_name = "EleutherAI/gpt-j-6B" model = GPTJModel.from_pretrained(model_name, revision="float16", torch_dtype=torch.float16) self.assertIsNotNone(model) @require_torch class GPTJModelLanguageGenerationTest(unittest.TestCase): @tooslow def test_lm_generate_gptj(self): # Marked as @tooslow due to GPU OOM for checkpointing in [True, False]: model = GPTJForCausalLM.from_pretrained( "EleutherAI/gpt-j-6B", revision="float16", torch_dtype=torch.float16 ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(torch_device) input_ids = torch.tensor([[464, 3290]], dtype=torch.long, device=torch_device) # The dog # The dog is a man's best friend. It is a loyal companion, and it is a friend expected_output_ids = [464, 3290, 318, 257, 582, 338, 1266, 1545, 13, 632, 318, 257, 9112, 15185, 11, 290, 340, 318, 257, 1545] # fmt: skip output_ids = model.generate(input_ids, do_sample=False) self.assertListEqual(output_ids[0].tolist(), expected_output_ids) @tooslow def test_gptj_sample(self): # Marked as @tooslow due to GPU OOM (issue #13676) tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B", revision="float16") model = GPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B", revision="float16", torch_dtype=torch.float16) model.to(torch_device) torch.manual_seed(0) tokenized = tokenizer("Today is a nice day and", return_tensors="pt", return_token_type_ids=True) input_ids = tokenized.input_ids.to(torch_device) output_ids = model.generate(input_ids, do_sample=True) output_str = tokenizer.decode(output_ids[0], skip_special_tokens=True) token_type_ids = tokenized.token_type_ids.to(torch_device) output_seq = model.generate(input_ids=input_ids, do_sample=True, num_return_sequences=5) output_seq_tt = model.generate( input_ids=input_ids, token_type_ids=token_type_ids, do_sample=True, num_return_sequences=5 ) output_seq_strs = tokenizer.batch_decode(output_seq, skip_special_tokens=True) output_seq_tt_strs = tokenizer.batch_decode(output_seq_tt, skip_special_tokens=True) if torch_device != "cpu": # currently this expect value is only for `cuda` EXPECTED_OUTPUT_STR = ( "Today is a nice day and I've already been enjoying it. I walked to work with my wife" ) else: EXPECTED_OUTPUT_STR = "Today is a nice day and one of those days that feels a bit more alive. I am ready" self.assertEqual(output_str, EXPECTED_OUTPUT_STR) self.assertTrue( all(output_seq_strs[idx] != output_seq_tt_strs[idx] for idx in range(len(output_seq_tt_strs))) ) # token_type_ids should change output @slow def test_gptj_sample_max_time(self): tokenizer = AutoTokenizer.from_pretrained("anton-l/gpt-j-tiny-random") model = GPTJForCausalLM.from_pretrained("anton-l/gpt-j-tiny-random") model.to(torch_device) torch.manual_seed(0) tokenized = tokenizer("Today is a nice day and", return_tensors="pt", return_token_type_ids=True) input_ids = tokenized.input_ids.to(torch_device) MAX_TIME = 0.5 start = datetime.datetime.now() model.generate(input_ids, do_sample=True, max_time=MAX_TIME, max_length=256) duration = datetime.datetime.now() - start self.assertGreater(duration, datetime.timedelta(seconds=MAX_TIME)) self.assertLess(duration, datetime.timedelta(seconds=1.5 * MAX_TIME)) start = datetime.datetime.now() model.generate(input_ids, do_sample=False, max_time=MAX_TIME, max_length=256) duration = datetime.datetime.now() - start self.assertGreater(duration, datetime.timedelta(seconds=MAX_TIME)) self.assertLess(duration, datetime.timedelta(seconds=1.5 * MAX_TIME)) start = datetime.datetime.now() model.generate(input_ids, do_sample=False, num_beams=2, max_time=MAX_TIME, max_length=256) duration = datetime.datetime.now() - start self.assertGreater(duration, datetime.timedelta(seconds=MAX_TIME)) self.assertLess(duration, datetime.timedelta(seconds=1.5 * MAX_TIME)) start = datetime.datetime.now() model.generate(input_ids, do_sample=True, num_beams=2, max_time=MAX_TIME, max_length=256) duration = datetime.datetime.now() - start self.assertGreater(duration, datetime.timedelta(seconds=MAX_TIME)) self.assertLess(duration, datetime.timedelta(seconds=1.5 * MAX_TIME)) start = datetime.datetime.now() model.generate(input_ids, do_sample=False, max_time=None, max_length=256) duration = datetime.datetime.now() - start self.assertGreater(duration, datetime.timedelta(seconds=1.5 * MAX_TIME)) @tooslow def test_contrastive_search_gptj(self): article = ( "DeepMind Technologies is a British artificial intelligence subsidiary of Alphabet Inc. and " "research laboratory founded in 2010. DeepMind was acquired by Google in 2014. The company is based" ) tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B") model = GPTJForCausalLM.from_pretrained( "EleutherAI/gpt-j-6B", revision="float16", torch_dtype=torch.float16 ).to(torch_device) input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device) outputs = model.generate(input_ids, penalty_alpha=0.6, top_k=4, max_length=256) generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "DeepMind Technologies is a British artificial intelligence subsidiary of Alphabet Inc. and research " "laboratory founded in 2010. DeepMind was acquired by Google in 2014. The company is based in London, " "United Kingdom with offices in Mountain View, San Francisco, New York City, Paris, Tokyo, Seoul, " "Beijing, Singapore, Tel Aviv, Dublin, Sydney, and Melbourne.[1]\n\nContents\n\nIn 2010, Google's " "parent company, Alphabet, announced a $500 million investment in DeepMind, with the aim of creating " "a company that would apply deep learning to problems in healthcare, energy, transportation, and " "other areas.[2]\n\nOn April 23, 2014, Google announced that it had acquired DeepMind for $400 " "million in cash and stock.[3] The acquisition was seen as a way for Google to enter the " "fast-growing field of artificial intelligence (AI), which it had so far avoided due to concerns " 'about ethical and social implications.[4] Google co-founder Sergey Brin said that he was "thrilled" ' 'to have acquired DeepMind, and that it would "help us push the boundaries of AI even further."' "[5]\n\nDeepMind's founders, Demis Hassabis and Mustafa Suleyman, were joined by a number of Google " "employees" ], )
transformers/tests/models/gptj/test_modeling_gptj.py/0
{ "file_path": "transformers/tests/models/gptj/test_modeling_gptj.py", "repo_id": "transformers", "token_count": 11982 }
174
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors, Allegro.pl and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import unittest from transformers import HerbertTokenizer, HerbertTokenizerFast from transformers.models.herbert.tokenization_herbert import VOCAB_FILES_NAMES from transformers.testing_utils import get_tests_dir, require_sacremoses, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_sacremoses @require_tokenizers class HerbertTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "allegro/herbert-base-cased" tokenizer_class = HerbertTokenizer rust_tokenizer_class = HerbertTokenizerFast test_rust_tokenizer = True def setUp(self): super().setUp() # Use a simpler test file without japanese/chinese characters with open(f"{get_tests_dir()}/fixtures/sample_text_no_unicode.txt", encoding="utf-8") as f_data: self._data = f_data.read().replace("\n\n", "\n").strip() vocab = [ "<s>", "</s>", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", ",</w>", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["l o 123", "lo w 1456", "e r</w> 1789", ""] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens)) with open(self.merges_file, "w") as fp: fp.write("\n".join(merges)) def get_input_output_texts(self, tokenizer): input_text = "lower newer" output_text = "lower newer" return input_text, output_text def test_full_tokenizer(self): tokenizer = self.tokenizer_class(vocab_file=self.vocab_file, merges_file=self.merges_file) text = "lower" bpe_tokens = ["low", "er</w>"] tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + ["<unk>"] input_bpe_tokens = [16, 17, 23] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) def test_rust_and_python_full_tokenizers(self): if not self.test_rust_tokenizer: self.skipTest(reason="test_rust_tokenizer is set to False") tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() sequence = "lower,newer" tokens = tokenizer.tokenize(sequence) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) rust_tokenizer = self.get_rust_tokenizer() ids = tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) @slow def test_sequence_builders(self): tokenizer = self.tokenizer_class.from_pretrained("allegro/herbert-base-cased") text = tokenizer.encode("konstruowanie sekwencji", add_special_tokens=False) text_2 = tokenizer.encode("konstruowanie wielu sekwencji", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert encoded_sentence == [0] + text + [2] assert encoded_pair == [0] + text + [2] + text_2 + [2] @unittest.skip( "Test passes if run individually but not with the full tests (internal state of the tokenizer is modified). Will fix later" ) def test_training_new_tokenizer_with_special_tokens_change(self): pass @unittest.skip( "Test passes if run individually but not with the full tests (internal state of the tokenizer is modified). Will fix later" ) def test_training_new_tokenizer(self): pass
transformers/tests/models/herbert/test_tokenization_herbert.py/0
{ "file_path": "transformers/tests/models/herbert/test_tokenization_herbert.py", "repo_id": "transformers", "token_count": 2218 }
175
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil import tempfile import unittest from io import BytesIO from typing import Optional import requests from transformers import Idefics2Processor from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_vision_available from ...test_processing_common import ProcessorTesterMixin if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, Idefics2Processor, ) @require_torch @require_vision class Idefics2ProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = Idefics2Processor def setUp(self): self.tmpdirname = tempfile.mkdtemp() processor = Idefics2Processor.from_pretrained("HuggingFaceM4/idefics2-8b", image_seq_len=2) processor.save_pretrained(self.tmpdirname) self.image1 = Image.open( BytesIO( requests.get( "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" ).content ) ) self.image2 = Image.open( BytesIO(requests.get("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg").content) ) self.image3 = Image.open( BytesIO( requests.get( "https://thumbs.dreamstime.com/b/golden-gate-bridge-san-francisco-purple-flowers-california-echium-candicans-36805947.jpg" ).content ) ) self.bos_token = processor.tokenizer.bos_token self.image_token = processor.image_token.content self.fake_image_token = processor.fake_image_token.content self.bos_token_id = processor.tokenizer.convert_tokens_to_ids(self.bos_token) self.image_token_id = processor.tokenizer.convert_tokens_to_ids(self.image_token) self.fake_image_token_id = processor.tokenizer.convert_tokens_to_ids(self.fake_image_token) self.image_seq_len = processor.image_seq_len def get_tokenizer(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer def get_image_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor def get_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_process_interleaved_images_prompts_no_image_splitting(self): tokenizer = self.get_tokenizer() processor = self.get_processor() processor.image_processor.do_image_splitting = False # Test that a single image is processed correctly inputs = processor(images=self.image1) self.assertEqual(inputs["pixel_values"].shape, (1, 1, 3, 653, 980)) self.assertEqual(inputs["pixel_attention_mask"].shape, (1, 1, 653, 980)) # fmt: on # Test a single sample with image and text image_str = "<image>" text_str = "In this image, we see" text = image_str + text_str inputs = processor(text=text, images=self.image1) # fmt: off tokenized_sentence = tokenizer(text_str, add_special_tokens=False) expected_input_ids = [[self.bos_token_id] + [self.fake_image_token_id] + [self.image_token_id] * self.image_seq_len + [self.fake_image_token_id] + tokenized_sentence["input_ids"]] self.assertEqual(inputs["input_ids"], expected_input_ids) self.assertEqual(inputs["attention_mask"], [[1] * len(expected_input_ids[0])]) self.assertEqual(inputs["pixel_values"].shape, (1, 1, 3, 653, 980)) self.assertEqual(inputs["pixel_attention_mask"].shape, (1, 1, 653, 980)) # fmt: on # Test that batch is correctly processed image_str = "<image>" text_str_1 = "In this image, we see" text_str_2 = "bla, bla" text = [ image_str + text_str_1, text_str_2 + image_str + image_str, ] images = [[self.image1], [self.image2, self.image3]] inputs = processor(text=text, images=images, padding=True) # fmt: off tokenized_sentence_1 = tokenizer(text_str_1, add_special_tokens=False) tokenized_sentence_2 = tokenizer(text_str_2, add_special_tokens=False) expected_input_ids_1 = [self.bos_token_id] + [self.fake_image_token_id] + [self.image_token_id] * self.image_seq_len + [self.fake_image_token_id] + tokenized_sentence_1["input_ids"] expected_input_ids_2 = [self.bos_token_id] + tokenized_sentence_2["input_ids"] + [self.fake_image_token_id] + [self.image_token_id] * self.image_seq_len + [self.fake_image_token_id] + [self.image_token_id] * self.image_seq_len + [self.fake_image_token_id] # Pad the first input to match the second input pad_len = len(expected_input_ids_2) - len(expected_input_ids_1) padded_expected_input_ids_1 = [0] * pad_len + expected_input_ids_1 self.assertEqual( inputs["input_ids"], [padded_expected_input_ids_1, expected_input_ids_2] ) self.assertEqual( inputs["attention_mask"], [[0] * pad_len + [1] * len(expected_input_ids_1), [1] * len(expected_input_ids_2)] ) self.assertEqual(inputs['pixel_values'].shape, (2, 2, 3, 767, 980)) self.assertEqual(inputs['pixel_attention_mask'].shape, (2, 2, 767, 980)) # fmt: on def test_process_interleaved_images_prompts_image_splitting(self): processor = self.get_processor() tokenizer = self.get_tokenizer() processor.image_processor.do_image_splitting = True # Test that a single image is processed correctly inputs = processor(images=self.image1) self.assertEqual(inputs["pixel_values"].shape, (1, 5, 3, 653, 980)) self.assertEqual(inputs["pixel_attention_mask"].shape, (1, 5, 653, 980)) # fmt: on # Test a single sample with image and text image_str = "<image>" text_str = "In this image, we see" text = image_str + text_str inputs = processor(text=text, images=self.image1) # fmt: off tokenized_sentence = tokenizer(text_str, add_special_tokens=False) expected_input_ids = [[self.bos_token_id] + ([self.fake_image_token_id] + [self.image_token_id] * self.image_seq_len) * 5 + [self.fake_image_token_id] + tokenized_sentence["input_ids"]] self.assertEqual(inputs["input_ids"], expected_input_ids) self.assertEqual(inputs["attention_mask"], [[1] * len(expected_input_ids[0])]) self.assertEqual(inputs["pixel_values"].shape, (1, 5, 3, 653, 980)) self.assertEqual(inputs["pixel_attention_mask"].shape, (1, 5, 653, 980)) # fmt: on # Test that batch is correctly processed image_str = "<image>" text_str_1 = "In this image, we see" text_str_2 = "bla, bla" text = [ image_str + text_str_1, text_str_2 + image_str + image_str, ] images = [[self.image1], [self.image2, self.image3]] inputs = processor(text=text, images=images, padding=True) # fmt: off tokenized_sentence_1 = tokenizer(text_str_1, add_special_tokens=False) tokenized_sentence_2 = tokenizer(text_str_2, add_special_tokens=False) expected_input_ids_1 = [self.bos_token_id] + ([self.fake_image_token_id] + [self.image_token_id] * self.image_seq_len) * 5 + [self.fake_image_token_id] + tokenized_sentence_1["input_ids"] expected_input_ids_2 = [self.bos_token_id] + tokenized_sentence_2["input_ids"] + ([self.fake_image_token_id] + [self.image_token_id] * self.image_seq_len) * 5 + ([self.fake_image_token_id] + [self.image_token_id] * self.image_seq_len) * 5 + [self.fake_image_token_id] # Pad the first input to match the second input pad_len = len(expected_input_ids_2) - len(expected_input_ids_1) padded_expected_input_ids_1 = [0] * pad_len + expected_input_ids_1 self.assertEqual( inputs["input_ids"], [padded_expected_input_ids_1, expected_input_ids_2] ) self.assertEqual( inputs["attention_mask"], [[0] * pad_len + [1] * len(expected_input_ids_1), [1] * len(expected_input_ids_2)] ) self.assertEqual(inputs['pixel_values'].shape, (2, 10, 3, 767, 980)) self.assertEqual(inputs['pixel_attention_mask'].shape, (2, 10, 767, 980)) # fmt: on def test_add_special_tokens_processor(self): processor = self.get_processor() tokenizer = self.get_tokenizer() image_str = "<image>" text_str = "In this image, we see" text = text_str + image_str n_image_repeat = 5 if processor.image_processor.do_image_splitting else 1 # fmt: off inputs = processor(text=text, images=self.image1, add_special_tokens=False) tokenized_sentence = tokenizer(text_str, add_special_tokens=False) expected_input_ids = [tokenized_sentence["input_ids"] + ([self.fake_image_token_id] + [self.image_token_id] * self.image_seq_len) * n_image_repeat + [self.fake_image_token_id]] self.assertEqual(inputs["input_ids"], expected_input_ids) inputs = processor(text=text, images=self.image1) expected_input_ids = [[self.bos_token_id] + tokenized_sentence["input_ids"] + ([self.fake_image_token_id] + [self.image_token_id] * self.image_seq_len) * n_image_repeat + [self.fake_image_token_id]] self.assertEqual(inputs["input_ids"], expected_input_ids) # fmt: on def test_non_nested_images_with_batched_text(self): processor = self.get_processor() processor.image_processor.do_image_splitting = False image_str = "<image>" text_str_1 = "In this image, we see" text_str_2 = "bla, bla" text = [ image_str + text_str_1, text_str_2 + image_str + image_str, ] images = [self.image1, self.image2, self.image3] inputs = processor(text=text, images=images, padding=True) self.assertEqual(inputs["pixel_values"].shape, (2, 2, 3, 767, 980)) self.assertEqual(inputs["pixel_attention_mask"].shape, (2, 2, 767, 980)) def test_process_interleaved_images_prompts_image_error(self): processor = self.get_processor() text = [ "This is a test sentence.", "In this other sentence we try some good things", ] images = [[self.image1], [self.image2]] with self.assertRaises(ValueError): processor(text=text, images=images, padding=True) images = [[self.image1], []] with self.assertRaises(ValueError): processor(text=text, images=images, padding=True) text = [ "This is a test sentence.<image>", "In this other sentence we try some good things<image>", ] images = [[self.image1], [self.image2, self.image3]] with self.assertRaises(ValueError): processor(text=text, images=images, padding=True) images = [[], [self.image2]] with self.assertRaises(ValueError): processor(text=text, images=images, padding=True) images = [self.image1, self.image2, self.image3] with self.assertRaises(ValueError): processor(text=text, images=images, padding=True) images = [self.image1] with self.assertRaises(ValueError): processor(text=text, images=images, padding=True) text = [ "This is a test sentence.", "In this other sentence we try some good things<image>", ] images = [[self.image1], []] with self.assertRaises(ValueError): processor(text=text, images=images, padding=True) images = [[], [self.image2]] with self.assertRaises(ValueError): processor(text=text, images=images, padding=True) images = [self.image1, self.image2] with self.assertRaises(ValueError): processor(text=text, images=images, padding=True) images = [self.image1] with self.assertRaises(ValueError): processor(text=text, images=images, padding=True) def test_apply_chat_template(self): # Message contains content which a mix of lists with images and image urls and string messages = [ { "role": "user", "content": [ {"type": "text", "text": "What do these images show?"}, {"type": "image"}, {"type": "image"}, "What do these images show?", ], }, { "role": "assistant", "content": [ { "type": "text", "text": "The first image shows the statue of Liberty in New York. The second image picture depicts Idefix, the dog of Obelix in Asterix and Obelix.", } ], }, {"role": "user", "content": [{"type": "text", "text": "And who is that?"}]}, ] processor = self.get_processor() # Make short sequence length to test that the fake tokens are added correctly rendered = processor.apply_chat_template(messages, add_generation_prompt=True) expected_rendered = ( "User: What do these images show?<image><image><end_of_utterance>\n" "Assistant: The first image shows the statue of Liberty in New York. The second image picture depicts Idefix, the dog of Obelix in Asterix and Obelix.<end_of_utterance>\n" "User: And who is that?<end_of_utterance>\n" "Assistant:" ) self.assertEqual(rendered, expected_rendered) # Override as Idefics2Processor needs image tokens in prompts def prepare_text_inputs(self, batch_size: Optional[int] = None): if batch_size is None: return "lower newer <image>" if batch_size < 1: raise ValueError("batch_size must be greater than 0") if batch_size == 1: return ["lower newer <image>"] return ["lower newer <image>", "<image> upper older longer string"] + ["<image> lower newer"] * ( batch_size - 2 )
transformers/tests/models/idefics2/test_processor_idefics2.py/0
{ "file_path": "transformers/tests/models/idefics2/test_processor_idefics2.py", "repo_id": "transformers", "token_count": 6564 }
176
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import InstructBlipVideoImageProcessor class InstructBlipVideoProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=5, num_channels=3, image_size=24, min_resolution=30, max_resolution=80, do_resize=True, size=None, do_normalize=True, image_mean=OPENAI_CLIP_MEAN, image_std=OPENAI_CLIP_STD, do_convert_rgb=True, frames=4, ): super().__init__() size = size if size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_convert_rgb = do_convert_rgb self.frames = frames def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def expected_output_image_shape(self, images): return self.frames, self.num_channels, self.size["height"], self.size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): images = prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) # let's simply copy the frames to fake a long video-clip if numpify or torchify: videos = [] for image in images: if numpify: video = image[None, ...].repeat(self.frames, 0) else: video = image[None, ...].repeat(self.frames, 1, 1, 1) videos.append(video) else: videos = [] for pil_image in images: videos.append([pil_image] * self.frames) return videos @require_torch @require_vision class InstructBlipVideoProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = InstructBlipVideoImageProcessor if is_vision_available() else None def setUp(self): super().setUp() self.image_processor_tester = InstructBlipVideoProcessingTester(self) @property # Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTest.image_processor_dict def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_convert_rgb")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 18, "width": 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) def test_call_pil(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors video_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True) for video in video_inputs: self.assertIsInstance(video[0], Image.Image) # Test not batched input (pass as `videos` arg to test that ImageProcessor can handle videos in absence of images!) encoded_videos = image_processing(images=video_inputs[0], return_tensors="pt").pixel_values expected_output_video_shape = (1, 4, 3, 18, 18) self.assertEqual(tuple(encoded_videos.shape), expected_output_video_shape) # Test batched encoded_videos = image_processing(images=video_inputs, return_tensors="pt").pixel_values expected_output_video_shape = (5, 4, 3, 18, 18) self.assertEqual(tuple(encoded_videos.shape), expected_output_video_shape) def test_call_numpy(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors video_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, numpify=True) for video in video_inputs: self.assertIsInstance(video, np.ndarray) # Test not batched input (pass as `videos` arg to test that ImageProcessor can handle videos in absence of images!) encoded_videos = image_processing(images=video_inputs[0], return_tensors="pt").pixel_values expected_output_video_shape = (1, 4, 3, 18, 18) self.assertEqual(tuple(encoded_videos.shape), expected_output_video_shape) # Test batched encoded_videos = image_processing(images=video_inputs, return_tensors="pt").pixel_values expected_output_video_shape = (5, 4, 3, 18, 18) self.assertEqual(tuple(encoded_videos.shape), expected_output_video_shape) def test_call_pytorch(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors video_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, torchify=True) for video in video_inputs: self.assertIsInstance(video, torch.Tensor) # Test not batched input encoded_videos = image_processing(images=video_inputs[0], return_tensors="pt").pixel_values expected_output_video_shape = (1, 4, 3, 18, 18) self.assertEqual(tuple(encoded_videos.shape), expected_output_video_shape) # Test batched encoded_videos = image_processing(images=video_inputs, return_tensors="pt").pixel_values expected_output_video_shape = (5, 4, 3, 18, 18) self.assertEqual(tuple(encoded_videos.shape), expected_output_video_shape)
transformers/tests/models/instructblipvideo/test_image_processing_instrictblipvideo.py/0
{ "file_path": "transformers/tests/models/instructblipvideo/test_image_processing_instrictblipvideo.py", "repo_id": "transformers", "token_count": 3222 }
177
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch LayoutLMv2 model.""" import unittest from transformers.testing_utils import ( require_detectron2, require_non_xpu, require_torch, require_torch_multi_gpu, slow, torch_device, ) from transformers.utils import is_detectron2_available, is_torch_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch import torch.nn.functional as F from transformers import ( LayoutLMv2Config, LayoutLMv2ForQuestionAnswering, LayoutLMv2ForSequenceClassification, LayoutLMv2ForTokenClassification, LayoutLMv2Model, ) if is_detectron2_available(): from detectron2.structures.image_list import ImageList class LayoutLMv2ModelTester: def __init__( self, parent, batch_size=2, num_channels=3, image_size=4, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=36, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, image_feature_pool_shape=[7, 7, 256], coordinate_size=6, shape_size=6, num_labels=3, num_choices=4, scope=None, range_bbox=1000, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.image_feature_pool_shape = image_feature_pool_shape self.coordinate_size = coordinate_size self.shape_size = shape_size self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.range_bbox = range_bbox def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) bbox = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox) # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: t = bbox[i, j, 3] bbox[i, j, 3] = bbox[i, j, 1] bbox[i, j, 1] = t if bbox[i, j, 2] < bbox[i, j, 0]: t = bbox[i, j, 2] bbox[i, j, 2] = bbox[i, j, 0] bbox[i, j, 0] = t image = ImageList( torch.zeros(self.batch_size, self.num_channels, self.image_size, self.image_size, device=torch_device), self.image_size, ) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) config = LayoutLMv2Config( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, image_feature_pool_shape=self.image_feature_pool_shape, coordinate_size=self.coordinate_size, shape_size=self.shape_size, ) # use smaller resnet backbone to make tests faster config.detectron2_config_args["MODEL.RESNETS.DEPTH"] = 18 config.detectron2_config_args["MODEL.RESNETS.RES2_OUT_CHANNELS"] = 64 config.detectron2_config_args["MODEL.RESNETS.NUM_GROUPS"] = 1 return config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels def create_and_check_model( self, config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels ): model = LayoutLMv2Model(config=config) model.to(torch_device) model.eval() result = model(input_ids, bbox=bbox, image=image, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, bbox=bbox, image=image, token_type_ids=token_type_ids) result = model(input_ids, bbox=bbox, image=image) # LayoutLMv2 has a different expected sequence length, namely also visual tokens are added expected_seq_len = self.seq_length + self.image_feature_pool_shape[0] * self.image_feature_pool_shape[1] self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_sequence_classification( self, config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels ): config.num_labels = self.num_labels model = LayoutLMv2ForSequenceClassification(config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, image=image, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels ): config.num_labels = self.num_labels model = LayoutLMv2ForTokenClassification(config=config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, image=image, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels ): model = LayoutLMv2ForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, image=image, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "bbox": bbox, "image": image, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_non_xpu @require_torch @require_detectron2 class LayoutLMv2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_pruning = False test_torchscript = True test_mismatched_shapes = False all_model_classes = ( ( LayoutLMv2Model, LayoutLMv2ForSequenceClassification, LayoutLMv2ForTokenClassification, LayoutLMv2ForQuestionAnswering, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"document-question-answering": LayoutLMv2ForQuestionAnswering, "feature-extraction": LayoutLMv2Model} if is_torch_available() else {} ) def setUp(self): self.model_tester = LayoutLMv2ModelTester(self) self.config_tester = ConfigTester(self, config_class=LayoutLMv2Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @require_torch_multi_gpu @unittest.skip( reason=( "LayoutLMV2 and its dependency `detectron2` have some layers using `add_module` which doesn't work well" " with `nn.DataParallel`" ) ) def test_multi_gpu_data_parallel_forward(self): pass def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True # LayoutLMv2 has a different expected sequence length expected_seq_len = ( self.model_tester.seq_length + self.model_tester.image_feature_pool_shape[0] * self.model_tester.image_feature_pool_shape[1] ) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, expected_seq_len, expected_seq_len], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, expected_seq_len, expected_seq_len], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) # LayoutLMv2 has a different expected sequence length expected_seq_len = ( self.model_tester.seq_length + self.model_tester.image_feature_pool_shape[0] * self.model_tester.image_feature_pool_shape[1] ) self.assertListEqual( list(hidden_states[0].shape[-2:]), [expected_seq_len, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) @unittest.skip(reason="We cannot configure detectron2 to output a smaller backbone") def test_model_is_small(self): pass @slow def test_model_from_pretrained(self): model_name = "microsoft/layoutlmv2-base-uncased" model = LayoutLMv2Model.from_pretrained(model_name) self.assertIsNotNone(model) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if "backbone" in name or "visual_segment_embedding" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_batching_equivalence(self): def equivalence(tensor1, tensor2): return 1.0 - F.cosine_similarity(tensor1.float().flatten(), tensor2.float().flatten(), dim=0, eps=0) def recursive_check(batched_object, single_row_object, model_name, key): if isinstance(batched_object, (list, tuple)): for batched_object_value, single_row_object_value in zip(batched_object, single_row_object): recursive_check(batched_object_value, single_row_object_value, model_name, key) elif batched_object is None: return else: batched_row = batched_object[:1] self.assertFalse( torch.isnan(batched_row).any(), f"Batched output has `nan` in {model_name} for key={key}" ) self.assertFalse( torch.isinf(batched_row).any(), f"Batched output has `inf` in {model_name} for key={key}" ) self.assertFalse( torch.isnan(single_row_object).any(), f"Single row output has `nan` in {model_name} for key={key}" ) self.assertFalse( torch.isinf(single_row_object).any(), f"Single row output has `inf` in {model_name} for key={key}" ) self.assertTrue( (equivalence(batched_row, single_row_object)) <= 1e-03, msg=( f"Batched and Single row outputs are not equal in {model_name} for key={key}. " f"Difference={equivalence(batched_row, single_row_object)}." ), ) config, batched_input = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: config.output_hidden_states = True model_name = model_class.__name__ batched_input_prepared = self._prepare_for_class(batched_input, model_class) model = model_class(config).to(torch_device).eval() batch_size = self.model_tester.batch_size single_row_input = {} for key, value in batched_input_prepared.items(): if isinstance(value, torch.Tensor) and value.shape[0] % batch_size == 0: single_batch_shape = value.shape[0] // batch_size single_row_input[key] = value[:single_batch_shape] elif hasattr(value, "tensor"): # layoutlmv2uses ImageList intead of pixel values (needs for torchscript) single_row_input[key] = value.tensor[:single_batch_shape] with torch.no_grad(): model_batched_output = model(**batched_input_prepared) model_row_output = model(**single_row_input) for key in model_batched_output: recursive_check(model_batched_output[key], model_row_output[key], model_name, key) def prepare_layoutlmv2_batch_inputs(): # Here we prepare a batch of 2 sequences to test a LayoutLMv2 forward pass on: # fmt: off input_ids = torch.tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]]) # noqa: E231 bbox = torch.tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]]) # noqa: E231 image = ImageList(torch.randn((2,3,224,224)), image_sizes=[(224,224), (224,224)]) # noqa: E231 attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],]) # noqa: E231 token_type_ids = torch.tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]) # noqa: E231 # fmt: on return input_ids, bbox, image, attention_mask, token_type_ids @require_torch @require_detectron2 class LayoutLMv2ModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head(self): model = LayoutLMv2Model.from_pretrained("microsoft/layoutlmv2-base-uncased").to(torch_device) ( input_ids, bbox, image, attention_mask, token_type_ids, ) = prepare_layoutlmv2_batch_inputs() # forward pass outputs = model( input_ids=input_ids.to(torch_device), bbox=bbox.to(torch_device), image=image.to(torch_device), attention_mask=attention_mask.to(torch_device), token_type_ids=token_type_ids.to(torch_device), ) # verify the sequence output expected_shape = torch.Size( ( 2, input_ids.shape[1] + model.config.image_feature_pool_shape[0] * model.config.image_feature_pool_shape[1], model.config.hidden_size, ) ) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) expected_slice = torch.tensor( [[-0.1087, 0.0727, -0.3075], [0.0799, -0.0427, -0.0751], [-0.0367, 0.0480, -0.1358]], device=torch_device ) torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-3, atol=1e-3) # verify the pooled output expected_shape = torch.Size((2, model.config.hidden_size)) self.assertEqual(outputs.pooler_output.shape, expected_shape)
transformers/tests/models/layoutlmv2/test_modeling_layoutlmv2.py/0
{ "file_path": "transformers/tests/models/layoutlmv2/test_modeling_layoutlmv2.py", "repo_id": "transformers", "token_count": 11433 }
178