python_code
stringlengths 0
290k
| repo_name
stringclasses 30
values | file_path
stringlengths 6
125
|
---|---|---|
from setuptools import find_packages, setup
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
# version.py defines the VERSION and VERSION_SHORT variables.
# We use exec here so we don't import allennlp whilst setting up.
VERSION = {} # type: ignore
with open("allennlp/version.py", "r") as version_file:
exec(version_file.read(), VERSION)
setup(
name="allennlp",
version=VERSION["VERSION"],
description="An open-source NLP research library, built on PyTorch.",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
classifiers=[
"Intended Audience :: Science/Research",
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="allennlp NLP deep learning machine reading",
url="https://github.com/allenai/allennlp",
author="Allen Institute for Artificial Intelligence",
author_email="allennlp@allenai.org",
license="Apache",
packages=find_packages(
exclude=[
"*.tests",
"*.tests.*",
"tests.*",
"tests",
"test_fixtures",
"test_fixtures.*",
"benchmarks",
"benchmarks.*",
]
),
install_requires=[
"torch>=1.6.0,<1.8.0",
"jsonnet>=0.10.0 ; sys.platform != 'win32'",
"overrides==3.1.0",
"nltk",
"spacy>=2.1.0,<2.4",
"numpy",
"tensorboardX>=1.2",
"boto3>=1.14,<2.0",
"requests>=2.18",
"tqdm>=4.19",
"h5py",
"scikit-learn",
"scipy",
"pytest",
"transformers>=4.0,<4.1",
"sentencepiece",
"jsonpickle",
"dataclasses;python_version<'3.7'",
"filelock>=3.0,<3.1",
],
entry_points={"console_scripts": ["allennlp=allennlp.__main__:run"]},
include_package_data=True,
python_requires=">=3.6.1",
zip_safe=False,
)
| allennlp-master | setup.py |
allennlp-master | test_fixtures/__init__.py |
|
from d.d import D
| allennlp-master | test_fixtures/plugins/d/__init__.py |
import argparse
from overrides import overrides
from allennlp.commands import Subcommand
def do_nothing(_):
pass
@Subcommand.register("d")
class D(Subcommand):
@overrides
def add_subparser(self, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
subparser = parser.add_parser(self.name, description="fake", help="fake help")
subparser.set_defaults(func=do_nothing)
return subparser
| allennlp-master | test_fixtures/plugins/d/d.py |
import os
_MAJOR = "1"
_MINOR = "3"
# On master and in a nightly release the patch should be one ahead of the last
# released build.
_PATCH = "0"
# This is mainly for nightly builds which have the suffix ".dev$DATE". See
# https://semver.org/#is-v123-a-semantic-version for the semantics.
_SUFFIX = os.environ.get("ALLENNLP_VERSION_SUFFIX", "")
VERSION_SHORT = "{0}.{1}".format(_MAJOR, _MINOR)
VERSION = "{0}.{1}.{2}{3}".format(_MAJOR, _MINOR, _PATCH, _SUFFIX)
| allennlp-master | allennlp/version.py |
# Make sure that allennlp is running on Python 3.6.1 or later
# (to avoid running into this bug: https://bugs.python.org/issue29246)
import sys
if sys.version_info < (3, 6, 1):
raise RuntimeError("AllenNLP requires Python 3.6.1 or later")
# We get a lot of these spurious warnings,
# see https://github.com/ContinuumIO/anaconda-issues/issues/6678
import warnings # noqa
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
try:
# On some systems this prevents the dreaded
# ImportError: dlopen: cannot load any more object with static TLS
import transformers, spacy, torch, numpy # noqa
except ModuleNotFoundError:
print(
"Using AllenNLP requires the python packages Spacy, "
"Pytorch and Numpy to be installed. Please see "
"https://github.com/allenai/allennlp for installation instructions."
)
raise
from allennlp.version import VERSION as __version__ # noqa
| allennlp-master | allennlp/__init__.py |
#!/usr/bin/env python
import logging
import os
import sys
if os.environ.get("ALLENNLP_DEBUG"):
LEVEL = logging.DEBUG
else:
level_name = os.environ.get("ALLENNLP_LOG_LEVEL", "INFO")
LEVEL = logging._nameToLevel.get(level_name, logging.INFO)
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir))))
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level=LEVEL)
# filelock emits too many messages, so tell it to be quiet unless it has something
# important to say.
logging.getLogger("filelock").setLevel(logging.WARNING)
# transformers emits an annoying log message everytime it's imported, so we filter that
# one message out specifically.
def _transformers_log_filter(record):
if record.msg.startswith("PyTorch version"):
return False
return True
logging.getLogger("transformers.file_utils").addFilter(_transformers_log_filter)
from allennlp.commands import main # noqa
def run():
main(prog="allennlp")
if __name__ == "__main__":
run()
| allennlp-master | allennlp/__main__.py |
allennlp-master | allennlp/tools/__init__.py |
|
import os
from allennlp.common.file_utils import CACHE_DIRECTORY
from allennlp.common.file_utils import filename_to_url
def main():
print(f"Looking for datasets in {CACHE_DIRECTORY}...")
if not os.path.exists(CACHE_DIRECTORY):
print("Directory does not exist.")
print("No cached datasets found.")
cached_files = os.listdir(CACHE_DIRECTORY)
if not cached_files:
print("Directory is empty.")
print("No cached datasets found.")
for filename in cached_files:
if not filename.endswith("json"):
url, etag = filename_to_url(filename)
print("Filename: %s" % filename)
print("Url: %s" % url)
print("ETag: %s" % etag)
print()
if __name__ == "__main__":
main()
| allennlp-master | allennlp/tools/inspect_cache.py |
#! /usr/bin/env python
"""
Helper script for modifying config.json files that are locked inside
model.tar.gz archives. This is useful if you need to rename things or
add or remove values, usually because of changes to the library.
This script will untar the archive to a temp directory, launch an editor
to modify the config.json, and then re-tar everything to a new archive.
If your $EDITOR environment variable is not set, you'll have to explicitly
specify which editor to use.
"""
import argparse
import atexit
import logging
import os
import shutil
import subprocess
import tempfile
import tarfile
from allennlp.common.file_utils import cached_path
from allennlp.models.archival import CONFIG_NAME
logger = logging.getLogger()
logger.setLevel(logging.ERROR)
def main():
parser = argparse.ArgumentParser(
description="Perform surgery on a model.tar.gz archive",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("--input-file", required=True, help="path to input file")
parser.add_argument(
"--editor",
default=os.environ.get("EDITOR"),
help="editor to launch, whose default value is `$EDITOR` the environment variable",
)
output = parser.add_mutually_exclusive_group()
output.add_argument("--output-file", help="path to output file")
output.add_argument(
"--inplace",
action="store_true",
help="overwrite the input file with the modified configuration",
)
parser.add_argument(
"-f", "--force", action="store_true", help="overwrite the output file if it exists"
)
args = parser.parse_args()
if args.editor is None:
raise RuntimeError("please specify an editor or set the $EDITOR environment variable")
if not args.inplace and os.path.exists(args.output_file) and not args.force:
raise ValueError("output file already exists, use --force to override")
archive_file = cached_path(args.input_file)
if not os.path.exists(archive_file):
raise ValueError("input file doesn't exist")
if args.inplace:
output_file = archive_file
else:
output_file = args.output_file
# Extract archive to temp dir
tempdir = tempfile.mkdtemp()
with tarfile.open(archive_file, "r:gz") as archive:
archive.extractall(tempdir)
atexit.register(lambda: shutil.rmtree(tempdir))
config_path = os.path.join(tempdir, CONFIG_NAME)
subprocess.run([args.editor, config_path], check=False)
with tarfile.open(output_file, "w:gz") as tar:
tar.add(tempdir, arcname=os.path.sep)
if __name__ == "__main__":
main()
| allennlp-master | allennlp/tools/archive_surgery.py |
import argparse
import gzip
import os
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.data import Token, Vocabulary
from allennlp.data.token_indexers import ELMoTokenCharactersIndexer
from allennlp.data.vocabulary import DEFAULT_OOV_TOKEN
from allennlp.modules.elmo import _ElmoCharacterEncoder
def main(
vocab_path: str,
elmo_config_path: str,
elmo_weights_path: str,
output_dir: str,
batch_size: int,
device: int,
use_custom_oov_token: bool = False,
):
"""
Creates ELMo word representations from a vocabulary file. These
word representations are _independent_ - they are the result of running
the CNN and Highway layers of the ELMo model, but not the Bidirectional LSTM.
ELMo requires 2 additional tokens: <S> and </S>. The first token
in this file is assumed to be an unknown token.
This script produces two artifacts: A new vocabulary file
with the <S> and </S> tokens inserted and a glove formatted embedding
file containing word : vector pairs, one per line, with all values
separated by a space.
"""
# Load the vocabulary words and convert to char ids
with open(vocab_path, "r") as vocab_file:
tokens = vocab_file.read().strip().split("\n")
# Insert the sentence boundary tokens which elmo uses at positions 1 and 2.
if tokens[0] != DEFAULT_OOV_TOKEN and not use_custom_oov_token:
raise ConfigurationError("ELMo embeddings require the use of a OOV token.")
tokens = [tokens[0]] + ["<S>", "</S>"] + tokens[1:]
indexer = ELMoTokenCharactersIndexer()
indices = indexer.tokens_to_indices([Token(token) for token in tokens], Vocabulary())["tokens"]
sentences = []
for k in range((len(indices) // 50) + 1):
sentences.append(
indexer.as_padded_tensor_dict(
indices[(k * 50) : ((k + 1) * 50)], padding_lengths={"tokens": 50}
)
)
last_batch_remainder = 50 - (len(indices) % 50)
if device != -1:
elmo_token_embedder = _ElmoCharacterEncoder(elmo_config_path, elmo_weights_path).cuda(
device
)
else:
elmo_token_embedder = _ElmoCharacterEncoder(elmo_config_path, elmo_weights_path)
all_embeddings = []
for i in range((len(sentences) // batch_size) + 1):
batch = torch.stack(sentences[i * batch_size : (i + 1) * batch_size])
if device != -1:
batch = batch.cuda(device)
token_embedding = elmo_token_embedder(batch)["token_embedding"].data
# Reshape back to a list of words of shape (batch_size * 50, encoding_dim)
# We also need to remove the <S>, </S> tokens appended by the encoder.
per_word_embeddings = (
token_embedding[:, 1:-1, :].contiguous().view(-1, token_embedding.size(-1))
)
all_embeddings.append(per_word_embeddings)
# Remove the embeddings associated with padding in the last batch.
all_embeddings[-1] = all_embeddings[-1][:-last_batch_remainder, :]
embedding_weight = torch.cat(all_embeddings, 0).cpu().numpy()
# Write out the embedding in a glove format.
os.makedirs(output_dir, exist_ok=True)
with gzip.open(os.path.join(output_dir, "elmo_embeddings.txt.gz"), "wb") as embeddings_file:
for i, word in enumerate(tokens):
string_array = " ".join(str(x) for x in list(embedding_weight[i, :]))
embeddings_file.write(f"{word} {string_array}\n".encode("utf-8"))
# Write out the new vocab with the <S> and </S> tokens.
_, vocab_file_name = os.path.split(vocab_path)
with open(os.path.join(output_dir, vocab_file_name), "w") as new_vocab_file:
for word in tokens:
new_vocab_file.write(f"{word}\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate CNN representations for a vocabulary using ELMo",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--vocab_path",
type=str,
help="A path to a vocabulary file to generate representations for.",
)
parser.add_argument(
"--elmo_config", type=str, help="The path to a directory containing an ELMo config file."
)
parser.add_argument(
"--elmo_weights", type=str, help="The path to a directory containing an ELMo weight file."
)
parser.add_argument(
"--output_dir", type=str, help="The output directory to store the serialised embeddings."
)
parser.add_argument("--batch_size", type=int, default=64, help="The batch size to use.")
parser.add_argument("--device", type=int, default=-1, help="The device to run on.")
parser.add_argument(
"--use_custom_oov_token",
type=bool,
default=False,
help="AllenNLP requires a particular OOV token."
"To generate embeddings with a custom OOV token,"
"add this flag.",
)
args = parser.parse_args()
main(
args.vocab_path,
args.elmo_config,
args.elmo_weights,
args.output_dir,
args.batch_size,
args.device,
args.use_custom_oov_token,
)
| allennlp-master | allennlp/tools/create_elmo_embeddings_from_vocab.py |
"""
Assorted utilities for working with neural networks in AllenNLP.
"""
import copy
import json
import logging
from collections import defaultdict
from typing import Any, Dict, List, Optional, Sequence, Tuple, TypeVar, Union
import math
import numpy
import torch
from allennlp.common.checks import ConfigurationError
logger = logging.getLogger(__name__)
T = TypeVar("T")
def has_tensor(obj) -> bool:
"""
Given a possibly complex data structure,
check if it has any torch.Tensors in it.
"""
if isinstance(obj, torch.Tensor):
return True
elif isinstance(obj, dict):
return any(has_tensor(value) for value in obj.values())
elif isinstance(obj, (list, tuple)):
return any(has_tensor(item) for item in obj)
else:
return False
def move_to_device(obj, cuda_device: Union[torch.device, int]):
"""
Given a structure (possibly) containing Tensors on the CPU,
move all the Tensors to the specified GPU (or do nothing, if they should be on the CPU).
"""
from allennlp.common.util import int_to_device
cuda_device = int_to_device(cuda_device)
if cuda_device == torch.device("cpu") or not has_tensor(obj):
return obj
elif isinstance(obj, torch.Tensor):
return obj.cuda(cuda_device)
elif isinstance(obj, dict):
return {key: move_to_device(value, cuda_device) for key, value in obj.items()}
elif isinstance(obj, list):
return [move_to_device(item, cuda_device) for item in obj]
elif isinstance(obj, tuple) and hasattr(obj, "_fields"):
# This is the best way to detect a NamedTuple, it turns out.
return obj.__class__(*(move_to_device(item, cuda_device) for item in obj))
elif isinstance(obj, tuple):
return tuple(move_to_device(item, cuda_device) for item in obj)
else:
return obj
def clamp_tensor(tensor, minimum, maximum):
"""
Supports sparse and dense tensors.
Returns a tensor with values clamped between the provided minimum and maximum,
without modifying the original tensor.
"""
if tensor.is_sparse:
coalesced_tensor = tensor.coalesce()
coalesced_tensor._values().clamp_(minimum, maximum)
return coalesced_tensor
else:
return tensor.clamp(minimum, maximum)
def batch_tensor_dicts(
tensor_dicts: List[Dict[str, torch.Tensor]], remove_trailing_dimension: bool = False
) -> Dict[str, torch.Tensor]:
"""
Takes a list of tensor dictionaries, where each dictionary is assumed to have matching keys,
and returns a single dictionary with all tensors with the same key batched together.
# Parameters
tensor_dicts : `List[Dict[str, torch.Tensor]]`
The list of tensor dictionaries to batch.
remove_trailing_dimension : `bool`
If `True`, we will check for a trailing dimension of size 1 on the tensors that are being
batched, and remove it if we find it.
"""
key_to_tensors: Dict[str, List[torch.Tensor]] = defaultdict(list)
for tensor_dict in tensor_dicts:
for key, tensor in tensor_dict.items():
key_to_tensors[key].append(tensor)
batched_tensors = {}
for key, tensor_list in key_to_tensors.items():
batched_tensor = torch.stack(tensor_list)
if remove_trailing_dimension and all(tensor.size(-1) == 1 for tensor in tensor_list):
batched_tensor = batched_tensor.squeeze(-1)
batched_tensors[key] = batched_tensor
return batched_tensors
def get_lengths_from_binary_sequence_mask(mask: torch.BoolTensor) -> torch.LongTensor:
"""
Compute sequence lengths for each batch element in a tensor using a
binary mask.
# Parameters
mask : `torch.BoolTensor`, required.
A 2D binary mask of shape (batch_size, sequence_length) to
calculate the per-batch sequence lengths from.
# Returns
`torch.LongTensor`
A torch.LongTensor of shape (batch_size,) representing the lengths
of the sequences in the batch.
"""
return mask.sum(-1)
def get_mask_from_sequence_lengths(
sequence_lengths: torch.Tensor, max_length: int
) -> torch.BoolTensor:
"""
Given a variable of shape `(batch_size,)` that represents the sequence lengths of each batch
element, this function returns a `(batch_size, max_length)` mask variable. For example, if
our input was `[2, 2, 3]`, with a `max_length` of 4, we'd return
`[[1, 1, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0]]`.
We require `max_length` here instead of just computing it from the input `sequence_lengths`
because it lets us avoid finding the max, then copying that value from the GPU to the CPU so
that we can use it to construct a new tensor.
"""
# (batch_size, max_length)
ones = sequence_lengths.new_ones(sequence_lengths.size(0), max_length)
range_tensor = ones.cumsum(dim=1)
return sequence_lengths.unsqueeze(1) >= range_tensor
def sort_batch_by_length(tensor: torch.Tensor, sequence_lengths: torch.Tensor):
"""
Sort a batch first tensor by some specified lengths.
# Parameters
tensor : `torch.FloatTensor`, required.
A batch first Pytorch tensor.
sequence_lengths : `torch.LongTensor`, required.
A tensor representing the lengths of some dimension of the tensor which
we want to sort by.
# Returns
sorted_tensor : `torch.FloatTensor`
The original tensor sorted along the batch dimension with respect to sequence_lengths.
sorted_sequence_lengths : `torch.LongTensor`
The original sequence_lengths sorted by decreasing size.
restoration_indices : `torch.LongTensor`
Indices into the sorted_tensor such that
`sorted_tensor.index_select(0, restoration_indices) == original_tensor`
permutation_index : `torch.LongTensor`
The indices used to sort the tensor. This is useful if you want to sort many
tensors using the same ordering.
"""
if not isinstance(tensor, torch.Tensor) or not isinstance(sequence_lengths, torch.Tensor):
raise ConfigurationError("Both the tensor and sequence lengths must be torch.Tensors.")
sorted_sequence_lengths, permutation_index = sequence_lengths.sort(0, descending=True)
sorted_tensor = tensor.index_select(0, permutation_index)
index_range = torch.arange(0, len(sequence_lengths), device=sequence_lengths.device)
# This is the equivalent of zipping with index, sorting by the original
# sequence lengths and returning the now sorted indices.
_, reverse_mapping = permutation_index.sort(0, descending=False)
restoration_indices = index_range.index_select(0, reverse_mapping)
return sorted_tensor, sorted_sequence_lengths, restoration_indices, permutation_index
def get_final_encoder_states(
encoder_outputs: torch.Tensor, mask: torch.BoolTensor, bidirectional: bool = False
) -> torch.Tensor:
"""
Given the output from a `Seq2SeqEncoder`, with shape `(batch_size, sequence_length,
encoding_dim)`, this method returns the final hidden state for each element of the batch,
giving a tensor of shape `(batch_size, encoding_dim)`. This is not as simple as
`encoder_outputs[:, -1]`, because the sequences could have different lengths. We use the
mask (which has shape `(batch_size, sequence_length)`) to find the final state for each batch
instance.
Additionally, if `bidirectional` is `True`, we will split the final dimension of the
`encoder_outputs` into two and assume that the first half is for the forward direction of the
encoder and the second half is for the backward direction. We will concatenate the last state
for each encoder dimension, giving `encoder_outputs[:, -1, :encoding_dim/2]` concatenated with
`encoder_outputs[:, 0, encoding_dim/2:]`.
"""
# These are the indices of the last words in the sequences (i.e. length sans padding - 1). We
# are assuming sequences are right padded.
# Shape: (batch_size,)
last_word_indices = mask.sum(1) - 1
batch_size, _, encoder_output_dim = encoder_outputs.size()
expanded_indices = last_word_indices.view(-1, 1, 1).expand(batch_size, 1, encoder_output_dim)
# Shape: (batch_size, 1, encoder_output_dim)
final_encoder_output = encoder_outputs.gather(1, expanded_indices)
final_encoder_output = final_encoder_output.squeeze(1) # (batch_size, encoder_output_dim)
if bidirectional:
final_forward_output = final_encoder_output[:, : (encoder_output_dim // 2)]
final_backward_output = encoder_outputs[:, 0, (encoder_output_dim // 2) :]
final_encoder_output = torch.cat([final_forward_output, final_backward_output], dim=-1)
return final_encoder_output
def get_dropout_mask(dropout_probability: float, tensor_for_masking: torch.Tensor):
"""
Computes and returns an element-wise dropout mask for a given tensor, where
each element in the mask is dropped out with probability dropout_probability.
Note that the mask is NOT applied to the tensor - the tensor is passed to retain
the correct CUDA tensor type for the mask.
# Parameters
dropout_probability : `float`, required.
Probability of dropping a dimension of the input.
tensor_for_masking : `torch.Tensor`, required.
# Returns
`torch.FloatTensor`
A torch.FloatTensor consisting of the binary mask scaled by 1/ (1 - dropout_probability).
This scaling ensures expected values and variances of the output of applying this mask
and the original tensor are the same.
"""
binary_mask = (torch.rand(tensor_for_masking.size()) > dropout_probability).to(
tensor_for_masking.device
)
# Scale mask by 1/keep_prob to preserve output statistics.
dropout_mask = binary_mask.float().div(1.0 - dropout_probability)
return dropout_mask
def masked_softmax(
vector: torch.Tensor,
mask: torch.BoolTensor,
dim: int = -1,
memory_efficient: bool = False,
) -> torch.Tensor:
"""
`torch.nn.functional.softmax(vector)` does not work if some elements of `vector` should be
masked. This performs a softmax on just the non-masked portions of `vector`. Passing
`None` in for the mask is also acceptable; you'll just get a regular softmax.
`vector` can have an arbitrary number of dimensions; the only requirement is that `mask` is
broadcastable to `vector's` shape. If `mask` has fewer dimensions than `vector`, we will
unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask,
do it yourself before passing the mask into this function.
If `memory_efficient` is set to true, we will simply use a very large negative number for those
masked positions so that the probabilities of those positions would be approximately 0.
This is not accurate in math, but works for most cases and consumes less memory.
In the case that the input vector is completely masked and `memory_efficient` is false, this function
returns an array of `0.0`. This behavior may cause `NaN` if this is used as the last layer of
a model that uses categorical cross-entropy loss. Instead, if `memory_efficient` is true, this function
will treat every element as equal, and do softmax over equal numbers.
"""
if mask is None:
result = torch.nn.functional.softmax(vector, dim=dim)
else:
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
if not memory_efficient:
# To limit numerical errors from large vector elements outside the mask, we zero these out.
result = torch.nn.functional.softmax(vector * mask, dim=dim)
result = result * mask
result = result / (
result.sum(dim=dim, keepdim=True) + tiny_value_of_dtype(result.dtype)
)
else:
masked_vector = vector.masked_fill(~mask, min_value_of_dtype(vector.dtype))
result = torch.nn.functional.softmax(masked_vector, dim=dim)
return result
def masked_log_softmax(vector: torch.Tensor, mask: torch.BoolTensor, dim: int = -1) -> torch.Tensor:
"""
`torch.nn.functional.log_softmax(vector)` does not work if some elements of `vector` should be
masked. This performs a log_softmax on just the non-masked portions of `vector`. Passing
`None` in for the mask is also acceptable; you'll just get a regular log_softmax.
`vector` can have an arbitrary number of dimensions; the only requirement is that `mask` is
broadcastable to `vector's` shape. If `mask` has fewer dimensions than `vector`, we will
unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask,
do it yourself before passing the mask into this function.
In the case that the input vector is completely masked, the return value of this function is
arbitrary, but not `nan`. You should be masking the result of whatever computation comes out
of this in that case, anyway, so the specific values returned shouldn't matter. Also, the way
that we deal with this case relies on having single-precision floats; mixing half-precision
floats with fully-masked vectors will likely give you `nans`.
If your logits are all extremely negative (i.e., the max value in your logit vector is -50 or
lower), the way we handle masking here could mess you up. But if you've got logit values that
extreme, you've got bigger problems than this.
"""
if mask is not None:
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
# vector + mask.log() is an easy way to zero out masked elements in logspace, but it
# results in nans when the whole vector is masked. We need a very small value instead of a
# zero in the mask for these cases.
vector = vector + (mask + tiny_value_of_dtype(vector.dtype)).log()
return torch.nn.functional.log_softmax(vector, dim=dim)
def masked_max(
vector: torch.Tensor,
mask: torch.BoolTensor,
dim: int,
keepdim: bool = False,
) -> torch.Tensor:
"""
To calculate max along certain dimensions on masked values
# Parameters
vector : `torch.Tensor`
The vector to calculate max, assume unmasked parts are already zeros
mask : `torch.BoolTensor`
The mask of the vector. It must be broadcastable with vector.
dim : `int`
The dimension to calculate max
keepdim : `bool`
Whether to keep dimension
# Returns
`torch.Tensor`
A `torch.Tensor` of including the maximum values.
"""
replaced_vector = vector.masked_fill(~mask, min_value_of_dtype(vector.dtype))
max_value, _ = replaced_vector.max(dim=dim, keepdim=keepdim)
return max_value
def masked_mean(
vector: torch.Tensor, mask: torch.BoolTensor, dim: int, keepdim: bool = False
) -> torch.Tensor:
"""
To calculate mean along certain dimensions on masked values
# Parameters
vector : `torch.Tensor`
The vector to calculate mean.
mask : `torch.BoolTensor`
The mask of the vector. It must be broadcastable with vector.
dim : `int`
The dimension to calculate mean
keepdim : `bool`
Whether to keep dimension
# Returns
`torch.Tensor`
A `torch.Tensor` of including the mean values.
"""
replaced_vector = vector.masked_fill(~mask, 0.0)
value_sum = torch.sum(replaced_vector, dim=dim, keepdim=keepdim)
value_count = torch.sum(mask, dim=dim, keepdim=keepdim)
return value_sum / value_count.float().clamp(min=tiny_value_of_dtype(torch.float))
def masked_flip(padded_sequence: torch.Tensor, sequence_lengths: List[int]) -> torch.Tensor:
"""
Flips a padded tensor along the time dimension without affecting masked entries.
# Parameters
padded_sequence : `torch.Tensor`
The tensor to flip along the time dimension.
Assumed to be of dimensions (batch size, num timesteps, ...)
sequence_lengths : `torch.Tensor`
A list containing the lengths of each unpadded sequence in the batch.
# Returns
`torch.Tensor`
A `torch.Tensor` of the same shape as padded_sequence.
"""
assert padded_sequence.size(0) == len(
sequence_lengths
), f"sequence_lengths length ${len(sequence_lengths)} does not match batch size ${padded_sequence.size(0)}"
num_timesteps = padded_sequence.size(1)
flipped_padded_sequence = torch.flip(padded_sequence, [1])
sequences = [
flipped_padded_sequence[i, num_timesteps - length :]
for i, length in enumerate(sequence_lengths)
]
return torch.nn.utils.rnn.pad_sequence(sequences, batch_first=True)
def viterbi_decode(
tag_sequence: torch.Tensor,
transition_matrix: torch.Tensor,
tag_observations: Optional[List[int]] = None,
allowed_start_transitions: torch.Tensor = None,
allowed_end_transitions: torch.Tensor = None,
top_k: int = None,
):
"""
Perform Viterbi decoding in log space over a sequence given a transition matrix
specifying pairwise (transition) potentials between tags and a matrix of shape
(sequence_length, num_tags) specifying unary potentials for possible tags per
timestep.
# Parameters
tag_sequence : `torch.Tensor`, required.
A tensor of shape (sequence_length, num_tags) representing scores for
a set of tags over a given sequence.
transition_matrix : `torch.Tensor`, required.
A tensor of shape (num_tags, num_tags) representing the binary potentials
for transitioning between a given pair of tags.
tag_observations : `Optional[List[int]]`, optional, (default = `None`)
A list of length `sequence_length` containing the class ids of observed
elements in the sequence, with unobserved elements being set to -1. Note that
it is possible to provide evidence which results in degenerate labelings if
the sequences of tags you provide as evidence cannot transition between each
other, or those transitions are extremely unlikely. In this situation we log a
warning, but the responsibility for providing self-consistent evidence ultimately
lies with the user.
allowed_start_transitions : `torch.Tensor`, optional, (default = `None`)
An optional tensor of shape (num_tags,) describing which tags the START token
may transition *to*. If provided, additional transition constraints will be used for
determining the start element of the sequence.
allowed_end_transitions : `torch.Tensor`, optional, (default = `None`)
An optional tensor of shape (num_tags,) describing which tags may transition *to* the
end tag. If provided, additional transition constraints will be used for determining
the end element of the sequence.
top_k : `int`, optional, (default = `None`)
Optional integer specifying how many of the top paths to return. For top_k>=1, returns
a tuple of two lists: top_k_paths, top_k_scores, For top_k==None, returns a flattened
tuple with just the top path and its score (not in lists, for backwards compatibility).
# Returns
viterbi_path : `List[int]`
The tag indices of the maximum likelihood tag sequence.
viterbi_score : `torch.Tensor`
The score of the viterbi path.
"""
if top_k is None:
top_k = 1
flatten_output = True
elif top_k >= 1:
flatten_output = False
else:
raise ValueError(f"top_k must be either None or an integer >=1. Instead received {top_k}")
sequence_length, num_tags = list(tag_sequence.size())
has_start_end_restrictions = (
allowed_end_transitions is not None or allowed_start_transitions is not None
)
if has_start_end_restrictions:
if allowed_end_transitions is None:
allowed_end_transitions = torch.zeros(num_tags)
if allowed_start_transitions is None:
allowed_start_transitions = torch.zeros(num_tags)
num_tags = num_tags + 2
new_transition_matrix = torch.zeros(num_tags, num_tags)
new_transition_matrix[:-2, :-2] = transition_matrix
# Start and end transitions are fully defined, but cannot transition between each other.
allowed_start_transitions = torch.cat(
[allowed_start_transitions, torch.tensor([-math.inf, -math.inf])]
)
allowed_end_transitions = torch.cat(
[allowed_end_transitions, torch.tensor([-math.inf, -math.inf])]
)
# First define how we may transition FROM the start and end tags.
new_transition_matrix[-2, :] = allowed_start_transitions
# We cannot transition from the end tag to any tag.
new_transition_matrix[-1, :] = -math.inf
new_transition_matrix[:, -1] = allowed_end_transitions
# We cannot transition to the start tag from any tag.
new_transition_matrix[:, -2] = -math.inf
transition_matrix = new_transition_matrix
if tag_observations:
if len(tag_observations) != sequence_length:
raise ConfigurationError(
"Observations were provided, but they were not the same length "
"as the sequence. Found sequence of length: {} and evidence: {}".format(
sequence_length, tag_observations
)
)
else:
tag_observations = [-1 for _ in range(sequence_length)]
if has_start_end_restrictions:
tag_observations = [num_tags - 2] + tag_observations + [num_tags - 1]
zero_sentinel = torch.zeros(1, num_tags)
extra_tags_sentinel = torch.ones(sequence_length, 2) * -math.inf
tag_sequence = torch.cat([tag_sequence, extra_tags_sentinel], -1)
tag_sequence = torch.cat([zero_sentinel, tag_sequence, zero_sentinel], 0)
sequence_length = tag_sequence.size(0)
path_scores = []
path_indices = []
if tag_observations[0] != -1:
one_hot = torch.zeros(num_tags)
one_hot[tag_observations[0]] = 100000.0
path_scores.append(one_hot.unsqueeze(0))
else:
path_scores.append(tag_sequence[0, :].unsqueeze(0))
# Evaluate the scores for all possible paths.
for timestep in range(1, sequence_length):
# Add pairwise potentials to current scores.
summed_potentials = path_scores[timestep - 1].unsqueeze(2) + transition_matrix
summed_potentials = summed_potentials.view(-1, num_tags)
# Best pairwise potential path score from the previous timestep.
max_k = min(summed_potentials.size()[0], top_k)
scores, paths = torch.topk(summed_potentials, k=max_k, dim=0)
# If we have an observation for this timestep, use it
# instead of the distribution over tags.
observation = tag_observations[timestep]
# Warn the user if they have passed
# invalid/extremely unlikely evidence.
if tag_observations[timestep - 1] != -1 and observation != -1:
if transition_matrix[tag_observations[timestep - 1], observation] < -10000:
logger.warning(
"The pairwise potential between tags you have passed as "
"observations is extremely unlikely. Double check your evidence "
"or transition potentials!"
)
if observation != -1:
one_hot = torch.zeros(num_tags)
one_hot[observation] = 100000.0
path_scores.append(one_hot.unsqueeze(0))
else:
path_scores.append(tag_sequence[timestep, :] + scores)
path_indices.append(paths.squeeze())
# Construct the most likely sequence backwards.
path_scores_v = path_scores[-1].view(-1)
max_k = min(path_scores_v.size()[0], top_k)
viterbi_scores, best_paths = torch.topk(path_scores_v, k=max_k, dim=0)
viterbi_paths = []
for i in range(max_k):
viterbi_path = [best_paths[i]]
for backward_timestep in reversed(path_indices):
viterbi_path.append(int(backward_timestep.view(-1)[viterbi_path[-1]]))
# Reverse the backward path.
viterbi_path.reverse()
if has_start_end_restrictions:
viterbi_path = viterbi_path[1:-1]
# Viterbi paths uses (num_tags * n_permutations) nodes; therefore, we need to modulo.
viterbi_path = [j % num_tags for j in viterbi_path]
viterbi_paths.append(viterbi_path)
if flatten_output:
return viterbi_paths[0], viterbi_scores[0]
return viterbi_paths, viterbi_scores
def get_text_field_mask(
text_field_tensors: Dict[str, Dict[str, torch.Tensor]],
num_wrapping_dims: int = 0,
padding_id: int = 0,
) -> torch.BoolTensor:
"""
Takes the dictionary of tensors produced by a `TextField` and returns a mask
with 0 where the tokens are padding, and 1 otherwise. `padding_id` specifies the id of padding tokens.
We also handle `TextFields` wrapped by an arbitrary number of `ListFields`, where the number of wrapping
`ListFields` is given by `num_wrapping_dims`.
If `num_wrapping_dims == 0`, the returned mask has shape `(batch_size, num_tokens)`.
If `num_wrapping_dims > 0` then the returned mask has `num_wrapping_dims` extra
dimensions, so the shape will be `(batch_size, ..., num_tokens)`.
There could be several entries in the tensor dictionary with different shapes (e.g., one for
word ids, one for character ids). In order to get a token mask, we use the tensor in
the dictionary with the lowest number of dimensions. After subtracting `num_wrapping_dims`,
if this tensor has two dimensions we assume it has shape `(batch_size, ..., num_tokens)`,
and use it for the mask. If instead it has three dimensions, we assume it has shape
`(batch_size, ..., num_tokens, num_features)`, and sum over the last dimension to produce
the mask. Most frequently this will be a character id tensor, but it could also be a
featurized representation of each token, etc.
If the input `text_field_tensors` contains the "mask" key, this is returned instead of inferring the mask.
"""
masks = []
for indexer_name, indexer_tensors in text_field_tensors.items():
if "mask" in indexer_tensors:
masks.append(indexer_tensors["mask"].bool())
if len(masks) == 1:
return masks[0]
elif len(masks) > 1:
# TODO(mattg): My guess is this will basically never happen, so I'm not writing logic to
# handle it. Should be straightforward to handle, though. If you see this error in
# practice, open an issue on github.
raise ValueError("found two mask outputs; not sure which to use!")
tensor_dims = [
(tensor.dim(), tensor)
for indexer_output in text_field_tensors.values()
for tensor in indexer_output.values()
]
tensor_dims.sort(key=lambda x: x[0])
smallest_dim = tensor_dims[0][0] - num_wrapping_dims
if smallest_dim == 2:
token_tensor = tensor_dims[0][1]
return token_tensor != padding_id
elif smallest_dim == 3:
character_tensor = tensor_dims[0][1]
return (character_tensor != padding_id).any(dim=-1)
else:
raise ValueError("Expected a tensor with dimension 2 or 3, found {}".format(smallest_dim))
def get_token_ids_from_text_field_tensors(
text_field_tensors: Dict[str, Dict[str, torch.Tensor]],
) -> torch.Tensor:
"""
Our `TextFieldTensors` are complex output structures, because they try to handle a lot of
potential variation. Sometimes, you just want to grab the token ids from this data structure,
and that's not trivial without hard-coding assumptions about your data processing, which defeats
the entire purpose of that generality. This method tries to let you get the token ids out of the
data structure in your model without hard-coding any assumptions.
"""
for indexer_name, indexer_tensors in text_field_tensors.items():
for argument_name, tensor in indexer_tensors.items():
if argument_name in ["tokens", "token_ids", "input_ids"]:
return tensor
raise NotImplementedError(
"Our heuristic for guessing the right token ids failed. Please open an issue on "
"github with more detail on how you got this error, so we can implement more robust "
"logic in this method."
)
def weighted_sum(matrix: torch.Tensor, attention: torch.Tensor) -> torch.Tensor:
"""
Takes a matrix of vectors and a set of weights over the rows in the matrix (which we call an
"attention" vector), and returns a weighted sum of the rows in the matrix. This is the typical
computation performed after an attention mechanism.
Note that while we call this a "matrix" of vectors and an attention "vector", we also handle
higher-order tensors. We always sum over the second-to-last dimension of the "matrix", and we
assume that all dimensions in the "matrix" prior to the last dimension are matched in the
"vector". Non-matched dimensions in the "vector" must be `directly after the batch dimension`.
For example, say I have a "matrix" with dimensions `(batch_size, num_queries, num_words,
embedding_dim)`. The attention "vector" then must have at least those dimensions, and could
have more. Both:
- `(batch_size, num_queries, num_words)` (distribution over words for each query)
- `(batch_size, num_documents, num_queries, num_words)` (distribution over words in a
query for each document)
are valid input "vectors", producing tensors of shape:
`(batch_size, num_queries, embedding_dim)` and
`(batch_size, num_documents, num_queries, embedding_dim)` respectively.
"""
# We'll special-case a few settings here, where there are efficient (but poorly-named)
# operations in pytorch that already do the computation we need.
if attention.dim() == 2 and matrix.dim() == 3:
return attention.unsqueeze(1).bmm(matrix).squeeze(1)
if attention.dim() == 3 and matrix.dim() == 3:
return attention.bmm(matrix)
if matrix.dim() - 1 < attention.dim():
expanded_size = list(matrix.size())
for i in range(attention.dim() - matrix.dim() + 1):
matrix = matrix.unsqueeze(1)
expanded_size.insert(i + 1, attention.size(i + 1))
matrix = matrix.expand(*expanded_size)
intermediate = attention.unsqueeze(-1).expand_as(matrix) * matrix
return intermediate.sum(dim=-2)
def sequence_cross_entropy_with_logits(
logits: torch.FloatTensor,
targets: torch.LongTensor,
weights: Union[torch.FloatTensor, torch.BoolTensor],
average: str = "batch",
label_smoothing: float = None,
gamma: float = None,
alpha: Union[float, List[float], torch.FloatTensor] = None,
) -> torch.FloatTensor:
"""
Computes the cross entropy loss of a sequence, weighted with respect to
some user provided weights. Note that the weighting here is not the same as
in the `torch.nn.CrossEntropyLoss()` criterion, which is weighting
classes; here we are weighting the loss contribution from particular elements
in the sequence. This allows loss computations for models which use padding.
# Parameters
logits : `torch.FloatTensor`, required.
A `torch.FloatTensor` of size (batch_size, sequence_length, num_classes)
which contains the unnormalized probability for each class.
targets : `torch.LongTensor`, required.
A `torch.LongTensor` of size (batch, sequence_length) which contains the
index of the true class for each corresponding step.
weights : `Union[torch.FloatTensor, torch.BoolTensor]`, required.
A `torch.FloatTensor` of size (batch, sequence_length)
average: `str`, optional (default = `"batch"`)
If "batch", average the loss across the batches. If "token", average
the loss across each item in the input. If `None`, return a vector
of losses per batch element.
label_smoothing : `float`, optional (default = `None`)
Whether or not to apply label smoothing to the cross-entropy loss.
For example, with a label smoothing value of 0.2, a 4 class classification
target would look like `[0.05, 0.05, 0.85, 0.05]` if the 3rd class was
the correct label.
gamma : `float`, optional (default = `None`)
Focal loss[*] focusing parameter `gamma` to reduces the relative loss for
well-classified examples and put more focus on hard. The greater value
`gamma` is, the more focus on hard examples.
alpha : `Union[float, List[float]]`, optional (default = `None`)
Focal loss[*] weighting factor `alpha` to balance between classes. Can be
used independently with `gamma`. If a single `float` is provided, it
is assumed binary case using `alpha` and `1 - alpha` for positive and
negative respectively. If a list of `float` is provided, with the same
length as the number of classes, the weights will match the classes.
[*] T. Lin, P. Goyal, R. Girshick, K. He and P. Dollár, "Focal Loss for
Dense Object Detection," 2017 IEEE International Conference on Computer
Vision (ICCV), Venice, 2017, pp. 2999-3007.
# Returns
`torch.FloatTensor`
A torch.FloatTensor representing the cross entropy loss.
If `average=="batch"` or `average=="token"`, the returned loss is a scalar.
If `average is None`, the returned loss is a vector of shape (batch_size,).
"""
if average not in {None, "token", "batch"}:
raise ValueError("Got average f{average}, expected one of None, 'token', or 'batch'")
# make sure weights are float
weights = weights.to(logits.dtype)
# sum all dim except batch
non_batch_dims = tuple(range(1, len(weights.shape)))
# shape : (batch_size,)
weights_batch_sum = weights.sum(dim=non_batch_dims)
# shape : (batch * sequence_length, num_classes)
logits_flat = logits.view(-1, logits.size(-1))
# shape : (batch * sequence_length, num_classes)
log_probs_flat = torch.nn.functional.log_softmax(logits_flat, dim=-1)
# shape : (batch * max_len, 1)
targets_flat = targets.view(-1, 1).long()
# focal loss coefficient
if gamma:
# shape : (batch * sequence_length, num_classes)
probs_flat = log_probs_flat.exp()
# shape : (batch * sequence_length,)
probs_flat = torch.gather(probs_flat, dim=1, index=targets_flat)
# shape : (batch * sequence_length,)
focal_factor = (1.0 - probs_flat) ** gamma
# shape : (batch, sequence_length)
focal_factor = focal_factor.view(*targets.size())
weights = weights * focal_factor
if alpha is not None:
# shape : () / (num_classes,)
if isinstance(alpha, (float, int)):
# shape : (2,)
alpha_factor = torch.tensor(
[1.0 - float(alpha), float(alpha)], dtype=weights.dtype, device=weights.device
)
elif isinstance(alpha, (list, numpy.ndarray, torch.Tensor)):
# shape : (c,)
alpha_factor = torch.tensor(alpha, dtype=weights.dtype, device=weights.device)
if not alpha_factor.size():
# shape : (1,)
alpha_factor = alpha_factor.view(1)
# shape : (2,)
alpha_factor = torch.cat([1 - alpha_factor, alpha_factor])
else:
raise TypeError(
("alpha must be float, list of float, or torch.FloatTensor, {} provided.").format(
type(alpha)
)
)
# shape : (batch, max_len)
alpha_factor = torch.gather(alpha_factor, dim=0, index=targets_flat.view(-1)).view(
*targets.size()
)
weights = weights * alpha_factor
if label_smoothing is not None and label_smoothing > 0.0:
num_classes = logits.size(-1)
smoothing_value = label_smoothing / num_classes
# Fill all the correct indices with 1 - smoothing value.
one_hot_targets = torch.zeros_like(log_probs_flat).scatter_(
-1, targets_flat, 1.0 - label_smoothing
)
smoothed_targets = one_hot_targets + smoothing_value
negative_log_likelihood_flat = -log_probs_flat * smoothed_targets
negative_log_likelihood_flat = negative_log_likelihood_flat.sum(-1, keepdim=True)
else:
# Contribution to the negative log likelihood only comes from the exact indices
# of the targets, as the target distributions are one-hot. Here we use torch.gather
# to extract the indices of the num_classes dimension which contribute to the loss.
# shape : (batch * sequence_length, 1)
negative_log_likelihood_flat = -torch.gather(log_probs_flat, dim=1, index=targets_flat)
# shape : (batch, sequence_length)
negative_log_likelihood = negative_log_likelihood_flat.view(*targets.size())
# shape : (batch, sequence_length)
negative_log_likelihood = negative_log_likelihood * weights
if average == "batch":
# shape : (batch_size,)
per_batch_loss = negative_log_likelihood.sum(non_batch_dims) / (
weights_batch_sum + tiny_value_of_dtype(negative_log_likelihood.dtype)
)
num_non_empty_sequences = (weights_batch_sum > 0).sum() + tiny_value_of_dtype(
negative_log_likelihood.dtype
)
return per_batch_loss.sum() / num_non_empty_sequences
elif average == "token":
return negative_log_likelihood.sum() / (
weights_batch_sum.sum() + tiny_value_of_dtype(negative_log_likelihood.dtype)
)
else:
# shape : (batch_size,)
per_batch_loss = negative_log_likelihood.sum(non_batch_dims) / (
weights_batch_sum + tiny_value_of_dtype(negative_log_likelihood.dtype)
)
return per_batch_loss
def replace_masked_values(
tensor: torch.Tensor, mask: torch.BoolTensor, replace_with: float
) -> torch.Tensor:
"""
Replaces all masked values in `tensor` with `replace_with`. `mask` must be broadcastable
to the same shape as `tensor`. We require that `tensor.dim() == mask.dim()`, as otherwise we
won't know which dimensions of the mask to unsqueeze.
This just does `tensor.masked_fill()`, except the pytorch method fills in things with a mask
value of 1, where we want the opposite. You can do this in your own code with
`tensor.masked_fill(~mask, replace_with)`.
"""
if tensor.dim() != mask.dim():
raise ConfigurationError(
"tensor.dim() (%d) != mask.dim() (%d)" % (tensor.dim(), mask.dim())
)
return tensor.masked_fill(~mask, replace_with)
def tensors_equal(tensor1: torch.Tensor, tensor2: torch.Tensor, tolerance: float = 1e-12) -> bool:
"""
A check for tensor equality (by value). We make sure that the tensors have the same shape,
then check all of the entries in the tensor for equality. We additionally allow the input
tensors to be lists or dictionaries, where we then do the above check on every position in the
list / item in the dictionary. If we find objects that aren't tensors as we're doing that, we
just defer to their equality check.
This is kind of a catch-all method that's designed to make implementing `__eq__` methods
easier, in a way that's really only intended to be useful for tests.
"""
if isinstance(tensor1, (list, tuple)):
if not isinstance(tensor2, (list, tuple)) or len(tensor1) != len(tensor2):
return False
return all(tensors_equal(t1, t2, tolerance) for t1, t2 in zip(tensor1, tensor2))
elif isinstance(tensor1, dict):
if not isinstance(tensor2, dict):
return False
if tensor1.keys() != tensor2.keys():
return False
return all(tensors_equal(tensor1[key], tensor2[key], tolerance) for key in tensor1)
elif isinstance(tensor1, torch.Tensor):
if not isinstance(tensor2, torch.Tensor):
return False
if tensor1.size() != tensor2.size():
return False
# Special case for bools since they don't support subtraction
if tensor1.dtype == torch.bool or tensor2.dtype == torch.bool:
return (tensor1 == tensor2).all()
return ((tensor1 - tensor2).abs().float() < tolerance).all()
else:
try:
return tensor1 == tensor2
except RuntimeError:
print(type(tensor1), type(tensor2))
raise
def device_mapping(cuda_device: int):
"""
In order to `torch.load()` a GPU-trained model onto a CPU (or specific GPU),
you have to supply a `map_location` function. Call this with
the desired `cuda_device` to get the function that `torch.load()` needs.
"""
def inner_device_mapping(storage: torch.Storage, location) -> torch.Storage:
if cuda_device >= 0:
return storage.cuda(cuda_device)
else:
return storage
return inner_device_mapping
def combine_tensors(combination: str, tensors: List[torch.Tensor]) -> torch.Tensor:
"""
Combines a list of tensors using element-wise operations and concatenation, specified by a
`combination` string. The string refers to (1-indexed) positions in the input tensor list,
and looks like `"1,2,1+2,3-1"`.
We allow the following kinds of combinations : `x`, `x*y`, `x+y`, `x-y`, and `x/y`,
where `x` and `y` are positive integers less than or equal to `len(tensors)`. Each of
the binary operations is performed elementwise. You can give as many combinations as you want
in the `combination` string. For example, for the input string `"1,2,1*2"`, the result
would be `[1;2;1*2]`, as you would expect, where `[;]` is concatenation along the last
dimension.
If you have a fixed, known way to combine tensors that you use in a model, you should probably
just use something like `torch.cat([x_tensor, y_tensor, x_tensor * y_tensor])`. This
function adds some complexity that is only necessary if you want the specific combination used
to be `configurable`.
If you want to do any element-wise operations, the tensors involved in each element-wise
operation must have the same shape.
This function also accepts `x` and `y` in place of `1` and `2` in the combination
string.
"""
if len(tensors) > 9:
raise ConfigurationError("Double-digit tensor lists not currently supported")
combination = combination.replace("x", "1").replace("y", "2")
to_concatenate = [_get_combination(piece, tensors) for piece in combination.split(",")]
return torch.cat(to_concatenate, dim=-1)
def _rindex(sequence: Sequence[T], obj: T) -> int:
"""
Return zero-based index in the sequence of the last item whose value is equal to obj. Raises a
ValueError if there is no such item.
# Parameters
sequence : `Sequence[T]`
obj : `T`
# Returns
`int`
zero-based index associated to the position of the last item equal to obj
"""
for i in range(len(sequence) - 1, -1, -1):
if sequence[i] == obj:
return i
raise ValueError(f"Unable to find {obj} in sequence {sequence}.")
def _get_combination(combination: str, tensors: List[torch.Tensor]) -> torch.Tensor:
if combination.isdigit():
index = int(combination) - 1
return tensors[index]
else:
if len(combination) != 3:
raise ConfigurationError("Invalid combination: " + combination)
first_tensor = _get_combination(combination[0], tensors)
second_tensor = _get_combination(combination[2], tensors)
operation = combination[1]
if operation == "*":
return first_tensor * second_tensor
elif operation == "/":
return first_tensor / second_tensor
elif operation == "+":
return first_tensor + second_tensor
elif operation == "-":
return first_tensor - second_tensor
else:
raise ConfigurationError("Invalid operation: " + operation)
def combine_tensors_and_multiply(
combination: str, tensors: List[torch.Tensor], weights: torch.nn.Parameter
) -> torch.Tensor:
"""
Like [`combine_tensors`](./util.md#combine_tensors), but does a weighted (linear)
multiplication while combining. This is a separate function from `combine_tensors`
because we try to avoid instantiating large intermediate tensors during the combination,
which is possible because we know that we're going to be multiplying by a weight vector in the end.
# Parameters
combination : `str`
Same as in `combine_tensors`
tensors : `List[torch.Tensor]`
A list of tensors to combine, where the integers in the `combination` are (1-indexed)
positions in this list of tensors. These tensors are all expected to have either three or
four dimensions, with the final dimension being an embedding. If there are four
dimensions, one of them must have length 1.
weights : `torch.nn.Parameter`
A vector of weights to use for the combinations. This should have shape (combined_dim,),
as calculated by `get_combined_dim`.
"""
if len(tensors) > 9:
raise ConfigurationError("Double-digit tensor lists not currently supported")
combination = combination.replace("x", "1").replace("y", "2")
pieces = combination.split(",")
tensor_dims = [tensor.size(-1) for tensor in tensors]
combination_dims = [_get_combination_dim(piece, tensor_dims) for piece in pieces]
dims_so_far = 0
to_sum = []
for piece, combination_dim in zip(pieces, combination_dims):
weight = weights[dims_so_far : (dims_so_far + combination_dim)]
dims_so_far += combination_dim
to_sum.append(_get_combination_and_multiply(piece, tensors, weight))
result = to_sum[0]
for result_piece in to_sum[1:]:
result = result + result_piece
return result
def _get_combination_and_multiply(
combination: str, tensors: List[torch.Tensor], weight: torch.nn.Parameter
) -> torch.Tensor:
if combination.isdigit():
index = int(combination) - 1
return torch.matmul(tensors[index], weight)
else:
if len(combination) != 3:
raise ConfigurationError("Invalid combination: " + combination)
first_tensor = _get_combination(combination[0], tensors)
second_tensor = _get_combination(combination[2], tensors)
operation = combination[1]
if operation == "*":
if first_tensor.dim() > 4 or second_tensor.dim() > 4:
raise ValueError("Tensors with dim > 4 not currently supported")
desired_dim = max(first_tensor.dim(), second_tensor.dim()) - 1
if first_tensor.dim() == 4:
expanded_dim = _rindex(first_tensor.size(), 1)
first_tensor = first_tensor.squeeze(expanded_dim)
if second_tensor.dim() == 4:
expanded_dim = _rindex(second_tensor.size(), 1)
second_tensor = second_tensor.squeeze(expanded_dim)
intermediate = first_tensor * weight
result = torch.matmul(intermediate, second_tensor.transpose(-1, -2))
if result.dim() == desired_dim + 1:
result = result.squeeze(-1)
return result
elif operation == "/":
if first_tensor.dim() > 4 or second_tensor.dim() > 4:
raise ValueError("Tensors with dim > 4 not currently supported")
desired_dim = max(first_tensor.dim(), second_tensor.dim()) - 1
if first_tensor.dim() == 4:
expanded_dim = _rindex(first_tensor.size(), 1)
first_tensor = first_tensor.squeeze(expanded_dim)
if second_tensor.dim() == 4:
expanded_dim = _rindex(second_tensor.size(), 1)
second_tensor = second_tensor.squeeze(expanded_dim)
intermediate = first_tensor * weight
result = torch.matmul(intermediate, second_tensor.pow(-1).transpose(-1, -2))
if result.dim() == desired_dim + 1:
result = result.squeeze(-1)
return result
elif operation == "+":
return torch.matmul(first_tensor, weight) + torch.matmul(second_tensor, weight)
elif operation == "-":
return torch.matmul(first_tensor, weight) - torch.matmul(second_tensor, weight)
else:
raise ConfigurationError("Invalid operation: " + operation)
def get_combined_dim(combination: str, tensor_dims: List[int]) -> int:
"""
For use with [`combine_tensors`](./util.md#combine_tensors).
This function computes the resultant dimension when calling `combine_tensors(combination, tensors)`,
when the tensor dimension is known. This is necessary for knowing the sizes of weight matrices
when building models that use `combine_tensors`.
# Parameters
combination : `str`
A comma-separated list of combination pieces, like `"1,2,1*2"`, specified identically to
`combination` in `combine_tensors`.
tensor_dims : `List[int]`
A list of tensor dimensions, where each dimension is from the `last axis` of the tensors
that will be input to `combine_tensors`.
"""
if len(tensor_dims) > 9:
raise ConfigurationError("Double-digit tensor lists not currently supported")
combination = combination.replace("x", "1").replace("y", "2")
return sum(_get_combination_dim(piece, tensor_dims) for piece in combination.split(","))
def _get_combination_dim(combination: str, tensor_dims: List[int]) -> int:
if combination.isdigit():
index = int(combination) - 1
return tensor_dims[index]
else:
if len(combination) != 3:
raise ConfigurationError("Invalid combination: " + combination)
first_tensor_dim = _get_combination_dim(combination[0], tensor_dims)
second_tensor_dim = _get_combination_dim(combination[2], tensor_dims)
operation = combination[1]
if first_tensor_dim != second_tensor_dim:
raise ConfigurationError('Tensor dims must match for operation "{}"'.format(operation))
return first_tensor_dim
def logsumexp(tensor: torch.Tensor, dim: int = -1, keepdim: bool = False) -> torch.Tensor:
"""
A numerically stable computation of logsumexp. This is mathematically equivalent to
`tensor.exp().sum(dim, keep=keepdim).log()`. This function is typically used for summing log
probabilities.
# Parameters
tensor : `torch.FloatTensor`, required.
A tensor of arbitrary size.
dim : `int`, optional (default = `-1`)
The dimension of the tensor to apply the logsumexp to.
keepdim: `bool`, optional (default = `False`)
Whether to retain a dimension of size one at the dimension we reduce over.
"""
max_score, _ = tensor.max(dim, keepdim=keepdim)
if keepdim:
stable_vec = tensor - max_score
else:
stable_vec = tensor - max_score.unsqueeze(dim)
return max_score + (stable_vec.exp().sum(dim, keepdim=keepdim)).log()
def get_device_of(tensor: torch.Tensor) -> int:
"""
Returns the device of the tensor.
"""
if not tensor.is_cuda:
return -1
else:
return tensor.get_device()
def flatten_and_batch_shift_indices(indices: torch.Tensor, sequence_length: int) -> torch.Tensor:
"""
This is a subroutine for [`batched_index_select`](./util.md#batched_index_select).
The given `indices` of size `(batch_size, d_1, ..., d_n)` indexes into dimension 2 of a
target tensor, which has size `(batch_size, sequence_length, embedding_size)`. This
function returns a vector that correctly indexes into the flattened target. The sequence
length of the target must be provided to compute the appropriate offsets.
```python
indices = torch.ones([2,3], dtype=torch.long)
# Sequence length of the target tensor.
sequence_length = 10
shifted_indices = flatten_and_batch_shift_indices(indices, sequence_length)
# Indices into the second element in the batch are correctly shifted
# to take into account that the target tensor will be flattened before
# the indices are applied.
assert shifted_indices == [1, 1, 1, 11, 11, 11]
```
# Parameters
indices : `torch.LongTensor`, required.
sequence_length : `int`, required.
The length of the sequence the indices index into.
This must be the second dimension of the tensor.
# Returns
offset_indices : `torch.LongTensor`
"""
# Shape: (batch_size)
if torch.max(indices) >= sequence_length or torch.min(indices) < 0:
raise ConfigurationError(
f"All elements in indices should be in range (0, {sequence_length - 1})"
)
offsets = get_range_vector(indices.size(0), get_device_of(indices)) * sequence_length
for _ in range(len(indices.size()) - 1):
offsets = offsets.unsqueeze(1)
# Shape: (batch_size, d_1, ..., d_n)
offset_indices = indices + offsets
# Shape: (batch_size * d_1 * ... * d_n)
offset_indices = offset_indices.view(-1)
return offset_indices
def batched_index_select(
target: torch.Tensor,
indices: torch.LongTensor,
flattened_indices: Optional[torch.LongTensor] = None,
) -> torch.Tensor:
"""
The given `indices` of size `(batch_size, d_1, ..., d_n)` indexes into the sequence
dimension (dimension 2) of the target, which has size `(batch_size, sequence_length,
embedding_size)`.
This function returns selected values in the target with respect to the provided indices, which
have size `(batch_size, d_1, ..., d_n, embedding_size)`. This can use the optionally
precomputed `flattened_indices` with size `(batch_size * d_1 * ... * d_n)` if given.
An example use case of this function is looking up the start and end indices of spans in a
sequence tensor. This is used in the
[CoreferenceResolver](https://docs.allennlp.org/models/master/models/coref/models/coref/)
model to select contextual word representations corresponding to the start and end indices of
mentions.
The key reason this can't be done with basic torch functions is that we want to be able to use look-up
tensors with an arbitrary number of dimensions (for example, in the coref model, we don't know
a-priori how many spans we are looking up).
# Parameters
target : `torch.Tensor`, required.
A 3 dimensional tensor of shape (batch_size, sequence_length, embedding_size).
This is the tensor to be indexed.
indices : `torch.LongTensor`
A tensor of shape (batch_size, ...), where each element is an index into the
`sequence_length` dimension of the `target` tensor.
flattened_indices : `Optional[torch.Tensor]`, optional (default = `None`)
An optional tensor representing the result of calling `flatten_and_batch_shift_indices`
on `indices`. This is helpful in the case that the indices can be flattened once and
cached for many batch lookups.
# Returns
selected_targets : `torch.Tensor`
A tensor with shape [indices.size(), target.size(-1)] representing the embedded indices
extracted from the batch flattened target tensor.
"""
if flattened_indices is None:
# Shape: (batch_size * d_1 * ... * d_n)
flattened_indices = flatten_and_batch_shift_indices(indices, target.size(1))
# Shape: (batch_size * sequence_length, embedding_size)
flattened_target = target.view(-1, target.size(-1))
# Shape: (batch_size * d_1 * ... * d_n, embedding_size)
flattened_selected = flattened_target.index_select(0, flattened_indices)
selected_shape = list(indices.size()) + [target.size(-1)]
# Shape: (batch_size, d_1, ..., d_n, embedding_size)
selected_targets = flattened_selected.view(*selected_shape)
return selected_targets
def masked_index_fill(
target: torch.Tensor, indices: torch.LongTensor, mask: torch.BoolTensor, fill_value: int = 1
) -> torch.Tensor:
"""
The given `indices` in `target` will be will be filled with `fill_value` given a `mask`.
# Parameters
target : `torch.Tensor`, required.
A 2 dimensional tensor of shape (batch_size, sequence_length).
This is the tensor to be filled.
indices : `torch.LongTensor`, required
A 2 dimensional tensor of shape (batch_size, num_indices),
These are the indices that will be filled in the original tensor.
mask : `torch.Tensor`, required.
A 2 dimensional tensor of shape (batch_size, num_indices), mask.sum() == `nonzero_indices`.
fill_value : `int`, optional (default = `1`)
The value we fill the tensor with.
# Returns
filled_target : `torch.Tensor`
A tensor with shape (batch_size, sequence_length) where 'indices' are filled with `fill_value`
"""
mask = mask.bool()
prev_shape = target.size()
# Shape: (batch_size * num_indices)
flattened_indices = flatten_and_batch_shift_indices(indices * mask, target.size(1))
# Shape: (batch_size * num_indices, 1)
mask = mask.view(-1)
# Shape: (batch_size * sequence_length, 1)
flattened_target = target.view(-1, 1)
# Shape: (nonzero_indices, 1)
unmasked_indices = flattened_indices[mask].unsqueeze(-1)
flattened_target = flattened_target.scatter(0, unmasked_indices, fill_value)
filled_target = flattened_target.reshape(prev_shape)
return filled_target
def masked_index_replace(
target: torch.Tensor,
indices: torch.LongTensor,
mask: torch.BoolTensor,
replace: torch.Tensor,
) -> torch.Tensor:
"""
The given `indices` in `target` will be will be replaced with corresponding index
from the `replace` tensor given a `mask`.
# Parameters
target : `torch.Tensor`, required.
A 3 dimensional tensor of shape (batch_size, sequence_length, embedding_dim).
This is the tensor to be replaced into.
indices : `torch.LongTensor`, required
A 2 dimensional tensor of shape (batch_size, num_indices),
These are the indices that will be replaced in the original tensor.
mask : `torch.Tensor`, required.
A 2 dimensional tensor of shape (batch_size, num_indices), mask.sum() == `nonzero_indices`.
replace : `torch.Tensor`, required.
A 3 dimensional tensor of shape (batch_size, num_indices, embedding_dim),
The tensor to perform scatter from.
# Returns
replaced_target : `torch.Tensor`
A tensor with shape (batch_size, sequence_length, embedding_dim) where 'indices'
are replaced with the corrosponding vector from `replace`
"""
target = target.clone()
mask = mask.bool()
prev_shape = target.size()
# Shape: (batch_size * num_indices)
flattened_indices = flatten_and_batch_shift_indices(indices * mask, target.size(1))
# Shape: (batch_size * sequence_length, embedding_size)
flattened_target = target.view(-1, target.size(-1))
# Shape: (nonzero_indices, 1)
mask = mask.view(-1)
flattened_target[flattened_indices[mask]] = replace.view(-1, replace.size(-1))[mask]
# Shape: (batch_size, sequence_length, embedding_dim)
replaced_target = flattened_target.reshape(prev_shape)
return replaced_target
def batched_span_select(target: torch.Tensor, spans: torch.LongTensor) -> torch.Tensor:
"""
The given `spans` of size `(batch_size, num_spans, 2)` indexes into the sequence
dimension (dimension 2) of the target, which has size `(batch_size, sequence_length,
embedding_size)`.
This function returns segmented spans in the target with respect to the provided span indices.
# Parameters
target : `torch.Tensor`, required.
A 3 dimensional tensor of shape (batch_size, sequence_length, embedding_size).
This is the tensor to be indexed.
indices : `torch.LongTensor`
A 3 dimensional tensor of shape (batch_size, num_spans, 2) representing start and end
indices (both inclusive) into the `sequence_length` dimension of the `target` tensor.
# Returns
span_embeddings : `torch.Tensor`
A tensor with shape (batch_size, num_spans, max_batch_span_width, embedding_size]
representing the embedded spans extracted from the batch flattened target tensor.
span_mask: `torch.BoolTensor`
A tensor with shape (batch_size, num_spans, max_batch_span_width) representing the mask on
the returned span embeddings.
"""
# both of shape (batch_size, num_spans, 1)
span_starts, span_ends = spans.split(1, dim=-1)
# shape (batch_size, num_spans, 1)
# These span widths are off by 1, because the span ends are `inclusive`.
span_widths = span_ends - span_starts
# We need to know the maximum span width so we can
# generate indices to extract the spans from the sequence tensor.
# These indices will then get masked below, such that if the length
# of a given span is smaller than the max, the rest of the values
# are masked.
max_batch_span_width = span_widths.max().item() + 1
# Shape: (1, 1, max_batch_span_width)
max_span_range_indices = get_range_vector(max_batch_span_width, get_device_of(target)).view(
1, 1, -1
)
# Shape: (batch_size, num_spans, max_batch_span_width)
# This is a broadcasted comparison - for each span we are considering,
# we are creating a range vector of size max_span_width, but masking values
# which are greater than the actual length of the span.
#
# We're using <= here (and for the mask below) because the span ends are
# inclusive, so we want to include indices which are equal to span_widths rather
# than using it as a non-inclusive upper bound.
span_mask = max_span_range_indices <= span_widths
raw_span_indices = span_starts + max_span_range_indices
# We also don't want to include span indices which greater than the sequence_length,
# which happens because some spans near the end of the sequence
# have a start index + max_batch_span_width > sequence_length, so we add this to the mask here.
span_mask = span_mask & (raw_span_indices < target.size(1)) & (0 <= raw_span_indices)
span_indices = raw_span_indices * span_mask
# Shape: (batch_size, num_spans, max_batch_span_width, embedding_dim)
span_embeddings = batched_index_select(target, span_indices)
return span_embeddings, span_mask
def flattened_index_select(target: torch.Tensor, indices: torch.LongTensor) -> torch.Tensor:
"""
The given `indices` of size `(set_size, subset_size)` specifies subsets of the `target`
that each of the set_size rows should select. The `target` has size
`(batch_size, sequence_length, embedding_size)`, and the resulting selected tensor has size
`(batch_size, set_size, subset_size, embedding_size)`.
# Parameters
target : `torch.Tensor`, required.
A Tensor of shape (batch_size, sequence_length, embedding_size).
indices : `torch.LongTensor`, required.
A LongTensor of shape (set_size, subset_size). All indices must be < sequence_length
as this tensor is an index into the sequence_length dimension of the target.
# Returns
selected : `torch.Tensor`, required.
A Tensor of shape (batch_size, set_size, subset_size, embedding_size).
"""
if indices.dim() != 2:
raise ConfigurationError(
"Indices passed to flattened_index_select had shape {} but "
"only 2 dimensional inputs are supported.".format(indices.size())
)
# Shape: (batch_size, set_size * subset_size, embedding_size)
flattened_selected = target.index_select(1, indices.view(-1))
# Shape: (batch_size, set_size, subset_size, embedding_size)
selected = flattened_selected.view(target.size(0), indices.size(0), indices.size(1), -1)
return selected
def get_range_vector(size: int, device: int) -> torch.Tensor:
"""
Returns a range vector with the desired size, starting at 0. The CUDA implementation
is meant to avoid copy data from CPU to GPU.
"""
if device > -1:
return torch.cuda.LongTensor(size, device=device).fill_(1).cumsum(0) - 1
else:
return torch.arange(0, size, dtype=torch.long)
def bucket_values(
distances: torch.Tensor, num_identity_buckets: int = 4, num_total_buckets: int = 10
) -> torch.Tensor:
"""
Places the given values (designed for distances) into `num_total_buckets`semi-logscale
buckets, with `num_identity_buckets` of these capturing single values.
The default settings will bucket values into the following buckets:
[0, 1, 2, 3, 4, 5-7, 8-15, 16-31, 32-63, 64+].
# Parameters
distances : `torch.Tensor`, required.
A Tensor of any size, to be bucketed.
num_identity_buckets: `int`, optional (default = `4`).
The number of identity buckets (those only holding a single value).
num_total_buckets : `int`, (default = `10`)
The total number of buckets to bucket values into.
# Returns
`torch.Tensor`
A tensor of the same shape as the input, containing the indices of the buckets
the values were placed in.
"""
# Chunk the values into semi-logscale buckets using .floor().
# This is a semi-logscale bucketing because we divide by log(2) after taking the log.
# We do this to make the buckets more granular in the initial range, where we expect
# most values to fall. We then add (num_identity_buckets - 1) because we want these indices
# to start _after_ the fixed number of buckets which we specified would only hold single values.
logspace_index = (distances.float().log() / math.log(2)).floor().long() + (
num_identity_buckets - 1
)
# create a mask for values which will go into single number buckets (i.e not a range).
use_identity_mask = (distances <= num_identity_buckets).long()
use_buckets_mask = 1 + (-1 * use_identity_mask)
# Use the original values if they are less than num_identity_buckets, otherwise
# use the logspace indices.
combined_index = use_identity_mask * distances + use_buckets_mask * logspace_index
# Clamp to put anything > num_total_buckets into the final bucket.
return combined_index.clamp(0, num_total_buckets - 1)
def add_sentence_boundary_token_ids(
tensor: torch.Tensor, mask: torch.BoolTensor, sentence_begin_token: Any, sentence_end_token: Any
) -> Tuple[torch.Tensor, torch.BoolTensor]:
"""
Add begin/end of sentence tokens to the batch of sentences.
Given a batch of sentences with size `(batch_size, timesteps)` or
`(batch_size, timesteps, dim)` this returns a tensor of shape
`(batch_size, timesteps + 2)` or `(batch_size, timesteps + 2, dim)` respectively.
Returns both the new tensor and updated mask.
# Parameters
tensor : `torch.Tensor`
A tensor of shape `(batch_size, timesteps)` or `(batch_size, timesteps, dim)`
mask : `torch.BoolTensor`
A tensor of shape `(batch_size, timesteps)`
sentence_begin_token: `Any`
Can be anything that can be broadcast in torch for assignment.
For 2D input, a scalar with the `<S>` id. For 3D input, a tensor with length dim.
sentence_end_token: `Any`
Can be anything that can be broadcast in torch for assignment.
For 2D input, a scalar with the `</S>` id. For 3D input, a tensor with length dim.
# Returns
tensor_with_boundary_tokens : `torch.Tensor`
The tensor with the appended and prepended boundary tokens. If the input was 2D,
it has shape (batch_size, timesteps + 2) and if the input was 3D, it has shape
(batch_size, timesteps + 2, dim).
new_mask : `torch.BoolTensor`
The new mask for the tensor, taking into account the appended tokens
marking the beginning and end of the sentence.
"""
sequence_lengths = mask.sum(dim=1).detach().cpu().numpy()
tensor_shape = list(tensor.data.shape)
new_shape = list(tensor_shape)
new_shape[1] = tensor_shape[1] + 2
tensor_with_boundary_tokens = tensor.new_zeros(*new_shape, device=tensor.device)
if len(tensor_shape) == 2:
tensor_with_boundary_tokens[:, 1:-1] = tensor
tensor_with_boundary_tokens[:, 0] = sentence_begin_token
for i, j in enumerate(sequence_lengths):
tensor_with_boundary_tokens[i, j + 1] = sentence_end_token
new_mask = tensor_with_boundary_tokens != 0
elif len(tensor_shape) == 3:
tensor_with_boundary_tokens[:, 1:-1, :] = tensor
sentence_begin_token = sentence_begin_token.detach().to(tensor.device)
sentence_end_token = sentence_end_token.detach().to(tensor.device)
for i, j in enumerate(sequence_lengths):
tensor_with_boundary_tokens[i, 0, :] = sentence_begin_token
tensor_with_boundary_tokens[i, j + 1, :] = sentence_end_token
new_mask = (tensor_with_boundary_tokens > 0).sum(dim=-1) > 0
else:
raise ValueError("add_sentence_boundary_token_ids only accepts 2D and 3D input")
return tensor_with_boundary_tokens, new_mask
def remove_sentence_boundaries(
tensor: torch.Tensor, mask: torch.BoolTensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Remove begin/end of sentence embeddings from the batch of sentences.
Given a batch of sentences with size `(batch_size, timesteps, dim)`
this returns a tensor of shape `(batch_size, timesteps - 2, dim)` after removing
the beginning and end sentence markers. The sentences are assumed to be padded on the right,
with the beginning of each sentence assumed to occur at index 0 (i.e., `mask[:, 0]` is assumed
to be 1).
Returns both the new tensor and updated mask.
This function is the inverse of `add_sentence_boundary_token_ids`.
# Parameters
tensor : `torch.Tensor`
A tensor of shape `(batch_size, timesteps, dim)`
mask : `torch.BoolTensor`
A tensor of shape `(batch_size, timesteps)`
# Returns
tensor_without_boundary_tokens : `torch.Tensor`
The tensor after removing the boundary tokens of shape `(batch_size, timesteps - 2, dim)`
new_mask : `torch.BoolTensor`
The new mask for the tensor of shape `(batch_size, timesteps - 2)`.
"""
sequence_lengths = mask.sum(dim=1).detach().cpu().numpy()
tensor_shape = list(tensor.data.shape)
new_shape = list(tensor_shape)
new_shape[1] = tensor_shape[1] - 2
tensor_without_boundary_tokens = tensor.new_zeros(*new_shape)
new_mask = tensor.new_zeros((new_shape[0], new_shape[1]), dtype=torch.bool)
for i, j in enumerate(sequence_lengths):
if j > 2:
tensor_without_boundary_tokens[i, : (j - 2), :] = tensor[i, 1 : (j - 1), :]
new_mask[i, : (j - 2)] = True
return tensor_without_boundary_tokens, new_mask
def add_positional_features(
tensor: torch.Tensor, min_timescale: float = 1.0, max_timescale: float = 1.0e4
):
"""
Implements the frequency-based positional encoding described
in [Attention is All you Need][0].
Adds sinusoids of different frequencies to a `Tensor`. A sinusoid of a
different frequency and phase is added to each dimension of the input `Tensor`.
This allows the attention heads to use absolute and relative positions.
The number of timescales is equal to hidden_dim / 2 within the range
(min_timescale, max_timescale). For each timescale, the two sinusoidal
signals sin(timestep / timescale) and cos(timestep / timescale) are
generated and concatenated along the hidden_dim dimension.
[0]: https://www.semanticscholar.org/paper/Attention-Is-All-You-Need-Vaswani-Shazeer/0737da0767d77606169cbf4187b83e1ab62f6077
# Parameters
tensor : `torch.Tensor`
a Tensor with shape (batch_size, timesteps, hidden_dim).
min_timescale : `float`, optional (default = `1.0`)
The smallest timescale to use.
max_timescale : `float`, optional (default = `1.0e4`)
The largest timescale to use.
# Returns
`torch.Tensor`
The input tensor augmented with the sinusoidal frequencies.
""" # noqa
_, timesteps, hidden_dim = tensor.size()
timestep_range = get_range_vector(timesteps, get_device_of(tensor)).data.float()
# We're generating both cos and sin frequencies,
# so half for each.
num_timescales = hidden_dim // 2
timescale_range = get_range_vector(num_timescales, get_device_of(tensor)).data.float()
log_timescale_increments = math.log(float(max_timescale) / float(min_timescale)) / float(
num_timescales - 1
)
inverse_timescales = min_timescale * torch.exp(timescale_range * -log_timescale_increments)
# Broadcasted multiplication - shape (timesteps, num_timescales)
scaled_time = timestep_range.unsqueeze(1) * inverse_timescales.unsqueeze(0)
# shape (timesteps, 2 * num_timescales)
sinusoids = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 1)
if hidden_dim % 2 != 0:
# if the number of dimensions is odd, the cos and sin
# timescales had size (hidden_dim - 1) / 2, so we need
# to add a row of zeros to make up the difference.
sinusoids = torch.cat([sinusoids, sinusoids.new_zeros(timesteps, 1)], 1)
return tensor + sinusoids.unsqueeze(0)
def clone(module: torch.nn.Module, num_copies: int) -> torch.nn.ModuleList:
"""Produce N identical layers."""
return torch.nn.ModuleList(copy.deepcopy(module) for _ in range(num_copies))
def combine_initial_dims(tensor: torch.Tensor) -> torch.Tensor:
"""
Given a (possibly higher order) tensor of ids with shape
(d1, ..., dn, sequence_length)
Return a view that's (d1 * ... * dn, sequence_length).
If original tensor is 1-d or 2-d, return it as is.
"""
if tensor.dim() <= 2:
return tensor
else:
return tensor.view(-1, tensor.size(-1))
def uncombine_initial_dims(tensor: torch.Tensor, original_size: torch.Size) -> torch.Tensor:
"""
Given a tensor of embeddings with shape
(d1 * ... * dn, sequence_length, embedding_dim)
and the original shape
(d1, ..., dn, sequence_length),
return the reshaped tensor of embeddings with shape
(d1, ..., dn, sequence_length, embedding_dim).
If original size is 1-d or 2-d, return it as is.
"""
if len(original_size) <= 2:
return tensor
else:
view_args = list(original_size) + [tensor.size(-1)]
return tensor.view(*view_args)
def inspect_parameters(module: torch.nn.Module, quiet: bool = False) -> Dict[str, Any]:
"""
Inspects the model/module parameters and their tunability. The output is structured
in a nested dict so that parameters in same sub-modules are grouped together.
This can be helpful to setup module path based regex, for example in initializer.
It prints it by default (optional) and returns the inspection dict. Eg. output::
{
"_text_field_embedder": {
"token_embedder_tokens": {
"_projection": {
"bias": "tunable",
"weight": "tunable"
},
"weight": "frozen"
}
}
}
"""
results: Dict[str, Any] = {}
for name, param in sorted(module.named_parameters()):
keys = name.split(".")
write_to = results
for key in keys[:-1]:
if key not in write_to:
write_to[key] = {}
write_to = write_to[key]
write_to[keys[-1]] = "tunable" if param.requires_grad else "frozen"
if not quiet:
print(json.dumps(results, indent=4))
return results
def find_text_field_embedder(model: torch.nn.Module) -> torch.nn.Module:
"""
Takes a `Model` and returns the `Module` that is a `TextFieldEmbedder`. We return just the
first one, as it's very rare to have more than one. If there isn't a `TextFieldEmbedder` in the
given `Model`, we raise a `ValueError`.
"""
from allennlp.modules.text_field_embedders.text_field_embedder import TextFieldEmbedder
for module in model.modules():
if isinstance(module, TextFieldEmbedder):
return module
raise ValueError("Couldn't find TextFieldEmbedder!")
def find_embedding_layer(model: torch.nn.Module) -> torch.nn.Module:
"""
Takes a model (typically an AllenNLP `Model`, but this works for any `torch.nn.Module`) and
makes a best guess about which module is the embedding layer. For typical AllenNLP models,
this often is the `TextFieldEmbedder`, but if you're using a pre-trained contextualizer, we
really want layer 0 of that contextualizer, not the output. So there are a bunch of hacks in
here for specific pre-trained contextualizers.
"""
# We'll look for a few special cases in a first pass, then fall back to just finding a
# TextFieldEmbedder in a second pass if we didn't find a special case.
from transformers.models.gpt2.modeling_gpt2 import GPT2Model
from transformers.models.bert.modeling_bert import BertEmbeddings
from transformers.models.albert.modeling_albert import AlbertEmbeddings
from transformers.models.roberta.modeling_roberta import RobertaEmbeddings
from allennlp.modules.text_field_embedders.text_field_embedder import TextFieldEmbedder
from allennlp.modules.text_field_embedders.basic_text_field_embedder import (
BasicTextFieldEmbedder,
)
from allennlp.modules.token_embedders.embedding import Embedding
for module in model.modules():
if isinstance(module, BertEmbeddings):
return module.word_embeddings
if isinstance(module, RobertaEmbeddings):
return module.word_embeddings
if isinstance(module, AlbertEmbeddings):
return module.word_embeddings
if isinstance(module, GPT2Model):
return module.wte
for module in model.modules():
if isinstance(module, TextFieldEmbedder):
if isinstance(module, BasicTextFieldEmbedder):
# We'll have a check for single Embedding cases, because we can be more efficient
# in cases like this. If this check fails, then for something like hotflip we need
# to actually run the text field embedder and construct a vector for each token.
if len(module._token_embedders) == 1:
embedder = list(module._token_embedders.values())[0]
if isinstance(embedder, Embedding):
if embedder._projection is None:
# If there's a projection inside the Embedding, then we need to return
# the whole TextFieldEmbedder, because there's more computation that
# needs to be done than just multiply by an embedding matrix.
return embedder
return module
raise RuntimeError("No embedding module found!")
def get_token_offsets_from_text_field_inputs(
text_field_inputs: List[Any],
) -> Optional[torch.Tensor]:
"""
Given a list of inputs to a TextFieldEmbedder, tries to find token offsets from those inputs, if
there are any. You will have token offsets if you are using a mismatched token embedder; if
you're not, the return value from this function should be None. This function is intended to be
called from a `forward_hook` attached to a `TextFieldEmbedder`, so the inputs are formatted just
as a list.
It's possible in theory that you could have multiple offsets as inputs to a single call to a
`TextFieldEmbedder`, but that's an extremely rare use case (I can't really imagine anyone
wanting to do that). In that case, we'll only return the first one. If you need different
behavior for your model, open an issue on github describing what you're doing.
"""
for input_index, text_field_input in enumerate(text_field_inputs):
if not isinstance(text_field_input, dict):
continue
for input_value in text_field_input.values():
if not isinstance(input_value, dict):
continue
for embedder_arg_name, embedder_arg_value in input_value.items():
if embedder_arg_name == "offsets":
return embedder_arg_value
return None
def extend_layer(layer: torch.nn.Module, new_dim: int) -> None:
valid_layers = [torch.nn.Linear, torch.nn.Bilinear]
if not any([isinstance(layer, i) for i in valid_layers]):
raise ConfigurationError("Inappropriate layer type")
extend_dim = new_dim - layer.out_features
if not extend_dim:
return layer
if isinstance(layer, torch.nn.Linear):
new_weight = torch.FloatTensor(extend_dim, layer.in_features)
elif isinstance(layer, torch.nn.Bilinear):
new_weight = torch.FloatTensor(extend_dim, layer.in1_features, layer.in2_features)
new_bias = torch.FloatTensor(extend_dim)
torch.nn.init.xavier_uniform_(new_weight)
torch.nn.init.zeros_(new_bias)
device = layer.weight.device
layer.weight = torch.nn.Parameter(
torch.cat([layer.weight.data, new_weight.to(device)], dim=0),
requires_grad=layer.weight.requires_grad,
)
layer.bias = torch.nn.Parameter(
torch.cat([layer.bias.data, new_bias.to(device)], dim=0),
requires_grad=layer.bias.requires_grad,
)
layer.out_features = new_dim
def masked_topk(
input_: torch.FloatTensor,
mask: torch.BoolTensor,
k: Union[int, torch.LongTensor],
dim: int = -1,
) -> Tuple[torch.LongTensor, torch.LongTensor, torch.FloatTensor]:
"""
Extracts the top-k items along a certain dimension. This is similar to `torch.topk` except:
(1) we allow of a `mask` that makes the function not consider certain elements;
(2) the returned top input, mask, and indices are sorted in their original order in the input;
(3) May use the same k for all dimensions, or different k for each.
# Parameters
input_ : `torch.FloatTensor`, required.
A tensor containing the items that we want to prune.
mask : `torch.BoolTensor`, required.
A tensor with the same shape as `input_` that makes the function not consider masked out
(i.e. False) elements.
k : `Union[int, torch.LongTensor]`, required.
If a tensor of shape as `input_` except without dimension `dim`, specifies the number of
items to keep for each dimension.
If an int, keep the same number of items for all dimensions.
# Returns
top_input : `torch.FloatTensor`
The values of the top-k scoring items.
Has the same shape as `input_` except dimension `dim` has value `k` when it's an `int`
or `k.max()` when it's a tensor.
top_mask : `torch.BoolTensor`
The corresponding mask for `top_input`.
Has the shape as `top_input`.
top_indices : `torch.IntTensor`
The indices of the top-k scoring items into the original `input_`
tensor. This is returned because it can be useful to retain pointers to
the original items, if each item is being scored by multiple distinct
scorers, for instance.
Has the shape as `top_input`.
"""
if input_.size() != mask.size():
raise ValueError("`input_` and `mask` must have the same shape.")
if not -input_.dim() <= dim < input_.dim():
raise ValueError("`dim` must be in `[-input_.dim(), input_.dim())`")
dim = (dim + input_.dim()) % input_.dim()
max_k = k if isinstance(k, int) else k.max()
# We put the dim in question to the last dimension by permutation, and squash all leading dims.
# [0, 1, ..., dim - 1, dim + 1, ..., input.dim() - 1, dim]
permutation = list(range(input_.dim()))
permutation.pop(dim)
permutation += [dim]
# [0, 1, ..., dim - 1, -1, dim, ..., input.dim() - 2]; for restoration
reverse_permutation = list(range(input_.dim() - 1))
reverse_permutation.insert(dim, -1)
other_dims_size = list(input_.size())
other_dims_size.pop(dim)
permuted_size = other_dims_size + [max_k] # for restoration
# If an int was given for number of items to keep, construct tensor by repeating the value.
if isinstance(k, int):
# Put the tensor on same device as the mask.
k = k * torch.ones(*other_dims_size, dtype=torch.long, device=mask.device)
else:
if list(k.size()) != other_dims_size:
raise ValueError(
"`k` must have the same shape as `input_` with dimension `dim` removed."
)
num_items = input_.size(dim)
# (batch_size, num_items) -- "batch_size" refers to all other dimensions stacked together
input_ = input_.permute(*permutation).reshape(-1, num_items)
mask = mask.permute(*permutation).reshape(-1, num_items)
k = k.reshape(-1)
# Make sure that we don't select any masked items by setting their scores to be very
# negative.
input_ = replace_masked_values(input_, mask, min_value_of_dtype(input_.dtype))
# Shape: (batch_size, max_k)
_, top_indices = input_.topk(max_k, 1)
# Mask based on number of items to keep for each sentence.
# Shape: (batch_size, max_k)
top_indices_mask = get_mask_from_sequence_lengths(k, max_k).bool()
# Fill all masked indices with largest "top" index for that sentence, so that all masked
# indices will be sorted to the end.
# Shape: (batch_size, 1)
fill_value, _ = top_indices.max(dim=1, keepdim=True)
# Shape: (batch_size, max_num_items_to_keep)
top_indices = torch.where(top_indices_mask, top_indices, fill_value)
# Now we order the selected indices in increasing order with
# respect to their indices (and hence, with respect to the
# order they originally appeared in the `embeddings` tensor).
top_indices, _ = top_indices.sort(1)
# Combine the masks on spans that are out-of-bounds, and the mask on spans that are outside
# the top k for each sentence.
# Shape: (batch_size, max_k)
sequence_mask = mask.gather(1, top_indices)
top_mask = top_indices_mask & sequence_mask
# Shape: (batch_size, max_k)
top_input = input_.gather(1, top_indices)
return (
top_input.reshape(*permuted_size).permute(*reverse_permutation),
top_mask.reshape(*permuted_size).permute(*reverse_permutation),
top_indices.reshape(*permuted_size).permute(*reverse_permutation),
)
def info_value_of_dtype(dtype: torch.dtype):
"""
Returns the `finfo` or `iinfo` object of a given PyTorch data type. Does not allow torch.bool.
"""
if dtype == torch.bool:
raise TypeError("Does not support torch.bool")
elif dtype.is_floating_point:
return torch.finfo(dtype)
else:
return torch.iinfo(dtype)
def min_value_of_dtype(dtype: torch.dtype):
"""
Returns the minimum value of a given PyTorch data type. Does not allow torch.bool.
"""
return info_value_of_dtype(dtype).min
def max_value_of_dtype(dtype: torch.dtype):
"""
Returns the maximum value of a given PyTorch data type. Does not allow torch.bool.
"""
return info_value_of_dtype(dtype).max
def tiny_value_of_dtype(dtype: torch.dtype):
"""
Returns a moderately tiny value for a given PyTorch data type that is used to avoid numerical
issues such as division by zero.
This is different from `info_value_of_dtype(dtype).tiny` because it causes some NaN bugs.
Only supports floating point dtypes.
"""
if not dtype.is_floating_point:
raise TypeError("Only supports floating point dtypes.")
if dtype == torch.float or dtype == torch.double:
return 1e-13
elif dtype == torch.half:
return 1e-4
else:
raise TypeError("Does not support dtype " + str(dtype))
| allennlp-master | allennlp/nn/util.py |
"""
An `Activation` is just a function
that takes some parameters and returns an element-wise activation function.
For the most part we just use
[PyTorch activations](https://pytorch.org/docs/master/nn.html#non-linear-activations).
Here we provide a thin wrapper to allow registering them and instantiating them `from_params`.
The available activation functions are
* "linear"
* ["mish"](https://arxiv.org/abs/1908.08681)
* ["swish"](https://arxiv.org/abs/1710.05941)
* ["relu"](https://pytorch.org/docs/master/nn.html#torch.nn.ReLU)
* ["relu6"](https://pytorch.org/docs/master/nn.html#torch.nn.ReLU6)
* ["elu"](https://pytorch.org/docs/master/nn.html#torch.nn.ELU)
* ["prelu"](https://pytorch.org/docs/master/nn.html#torch.nn.PReLU)
* ["leaky_relu"](https://pytorch.org/docs/master/nn.html#torch.nn.LeakyReLU)
* ["threshold"](https://pytorch.org/docs/master/nn.html#torch.nn.Threshold)
* ["hardtanh"](https://pytorch.org/docs/master/nn.html#torch.nn.Hardtanh)
* ["sigmoid"](https://pytorch.org/docs/master/nn.html#torch.nn.Sigmoid)
* ["tanh"](https://pytorch.org/docs/master/nn.html#torch.nn.Tanh)
* ["log_sigmoid"](https://pytorch.org/docs/master/nn.html#torch.nn.LogSigmoid)
* ["softplus"](https://pytorch.org/docs/master/nn.html#torch.nn.Softplus)
* ["softshrink"](https://pytorch.org/docs/master/nn.html#torch.nn.Softshrink)
* ["softsign"](https://pytorch.org/docs/master/nn.html#torch.nn.Softsign)
* ["tanhshrink"](https://pytorch.org/docs/master/nn.html#torch.nn.Tanhshrink)
* ["selu"](https://pytorch.org/docs/master/nn.html#torch.nn.SELU)
"""
from typing import Callable
import torch
from overrides import overrides
from allennlp.common import Registrable
class Activation(torch.nn.Module, Registrable):
"""
Pytorch has a number of built-in activation functions. We group those here under a common
type, just to make it easier to configure and instantiate them `from_params` using
`Registrable`.
Note that we're only including element-wise activation functions in this list. You really need
to think about masking when you do a softmax or other similar activation function, so it
requires a different API.
"""
def __call__(self, tensor: torch.Tensor) -> torch.Tensor:
"""
This function is here just to make mypy happy. We expect activation functions to follow
this API; the builtin pytorch activation functions follow this just fine, even though they
don't subclass `Activation`. We're just making it explicit here, so mypy knows that
activations are callable like this.
"""
raise NotImplementedError
class _ActivationLambda(torch.nn.Module):
"""Wrapper around non PyTorch, lambda based activations to display them as modules whenever printing model."""
def __init__(self, func: Callable[[torch.Tensor], torch.Tensor], name: str):
super().__init__()
self._name = name
self._func = func
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self._func(x)
@overrides
def _get_name(self):
return self._name
# There are no classes to decorate, so we hack these into Registrable._registry.
# If you want to instantiate it, you can do like this:
# Activation.by_name('relu')()
Registrable._registry[Activation] = {
"linear": (lambda: _ActivationLambda(lambda x: x, "Linear"), None), # type: ignore
"mish": ( # type: ignore
lambda: _ActivationLambda(
lambda x: x * torch.tanh(torch.nn.functional.softplus(x)), "Mish"
),
None,
),
"swish": (lambda: _ActivationLambda(lambda x: x * torch.sigmoid(x), "Swish"), None), # type: ignore
"relu": (torch.nn.ReLU, None),
"relu6": (torch.nn.ReLU6, None),
"elu": (torch.nn.ELU, None),
"prelu": (torch.nn.PReLU, None),
"leaky_relu": (torch.nn.LeakyReLU, None),
"threshold": (torch.nn.Threshold, None),
"hardtanh": (torch.nn.Hardtanh, None),
"sigmoid": (torch.nn.Sigmoid, None),
"tanh": (torch.nn.Tanh, None),
"log_sigmoid": (torch.nn.LogSigmoid, None),
"softplus": (torch.nn.Softplus, None),
"softshrink": (torch.nn.Softshrink, None),
"softsign": (torch.nn.Softsign, None),
"tanhshrink": (torch.nn.Tanhshrink, None),
"selu": (torch.nn.SELU, None),
"gelu": (torch.nn.GELU, None),
}
| allennlp-master | allennlp/nn/activations.py |
from allennlp.nn.activations import Activation
from allennlp.nn.initializers import Initializer, InitializerApplicator
from allennlp.nn.regularizers import RegularizerApplicator
| allennlp-master | allennlp/nn/__init__.py |
from inspect import signature
from typing import List, Callable, Tuple, Dict, cast, TypeVar
import warnings
from overrides import overrides
import torch
from allennlp.common import FromParams, Registrable
from allennlp.common.checks import ConfigurationError
from allennlp.nn.util import min_value_of_dtype
StateType = Dict[str, torch.Tensor]
StepFunctionTypeWithTimestep = Callable[
[torch.Tensor, StateType, int], Tuple[torch.Tensor, StateType]
]
StepFunctionTypeNoTimestep = Callable[[torch.Tensor, StateType], Tuple[torch.Tensor, StateType]]
StepFunctionType = TypeVar(
"StepFunctionType", StepFunctionTypeWithTimestep, StepFunctionTypeNoTimestep
)
"""
The type of step function that can be passed to [`BeamSearch.search`](#search).
This can either be [`StepFunctionTypeWithTimestep`](#stepfunctiontypewithtimestep)
or [`StepFunctionTypeNoTimestep`](#stepfunctiontypenotimestep).
"""
class Sampler(Registrable):
"""
An abstract class that can be used to sample candidates (either nodes or beams)
within `BeamSearch`.
A `Sampler` just has three methods, `init_state()`, `sample_nodes()` and `sample_beams()`.
`init_state()` takes three arguments:
- a tensor of starting log probs with shape `(batch_size,, num_classes)`,
- the batch size, an int,
- and the number of classes, also an int.
It returns a state dictionary with any state tensors needed for subsequent
calls to `sample_nodes()` and `sample_beams()`.
By default this method just returns an empty dictionary.
Both `sample_nodes()` and `sample_beams()` should take three arguments:
- tensor of normalized log probabilities with shape `(batch_size, num_examples)`,
- an integer representing the number of samples to take for each example in the batch,
- and a state dictionary which could contain any tensors needed for the `Sampler` to keep
track of state.
For `sample_nodes()`, `num_examples = num_classes`, but for `sample_beams`,
`num_examples = beam_size * per_node_beam_size`.
The return value should be a tuple containing:
- a tensor of log probabilities of the sampled examples with shape `(batch_size, num_samples)`,
- a tensor of indices of the sampled examples with shape `(batch_size, num_samples)`,
- and the updated state dictionary.
A default implementation of `sample_beams` is provided, which just deterministically
picks the `k` examples with highest log probability.
"""
default_implementation = "deterministic"
def init_state(
self, start_class_log_probabilities: torch.Tensor, batch_size: int, num_classes: int
) -> StateType:
return {}
def sample_nodes(
self, log_probs: torch.Tensor, per_node_beam_size: int, state: StateType
) -> Tuple[torch.Tensor, torch.Tensor, StateType]:
raise NotImplementedError
def sample_beams(
self, log_probs: torch.Tensor, beam_size: int, state: StateType
) -> Tuple[torch.Tensor, torch.Tensor, StateType]:
selected_log_probs, selected_indices = torch.topk(log_probs, beam_size, dim=-1)
return selected_log_probs, selected_indices, {}
@Sampler.register("deterministic")
class DeterministicSampler(Sampler):
"""
A `Sampler` that just deterministically returns the `k` nodes or beams with highest
log probability.
"""
@overrides
def sample_nodes(
self, log_probs: torch.Tensor, per_node_beam_size: int, state: StateType
) -> Tuple[torch.Tensor, torch.Tensor, StateType]:
selected_log_probs, selected_indices = torch.topk(log_probs, per_node_beam_size, dim=-1)
return selected_log_probs, selected_indices, {}
@Sampler.register("multinomial")
class MultinomialSampler(Sampler):
"""
A `Sampler` which samples nodes from the given multinomial distribution. Beams are sampled
in the default, non-deterministic way.
# Parameters
temperature : `float`, optional (default = `1.0`)
A `temperature` below 1.0 produces a sharper probability distribution and a `temperature` above 1.0
produces a flatter probability distribution.
with_replacement : `bool`, optional (default = `False`)
Whether to sample with replacement.
"""
def __init__(
self,
temperature: float = 1.0,
with_replacement: bool = False,
) -> None:
self.temperature = temperature
self.with_replacement = with_replacement
@overrides
def sample_nodes(
self, log_probs: torch.Tensor, per_node_beam_size: int, state: StateType
) -> Tuple[torch.Tensor, torch.Tensor, StateType]:
if self.temperature != 1.0:
_probabilities = torch.nn.functional.softmax(log_probs / self.temperature, dim=-1)
else:
_probabilities = log_probs.exp()
selected_indices = torch.multinomial(
_probabilities, per_node_beam_size, replacement=self.with_replacement
)
return torch.gather(log_probs, 1, selected_indices), selected_indices, state
@Sampler.register("top-k")
class TopKSampler(Sampler):
"""
A `Sampler` which redistributes the probability mass function for nodes among the
top `k` choices, then samples from that subset after re-normalizing the probabilities.
Beams are sampled in the default, deterministic way.
# Parameters
k : `int`, optional (default = `1`)
The number of top choices to be selected from.
temperature : `float`, optional (default = `1.0`)
A `temperature` below 1.0 produces a sharper probability distribution and a `temperature`
above 1.0 produces a flatter probability distribution.
with_replacement: `bool`, optional, (default = `False`)
If set to `True`, samples will be selected with replacement from the top k choices.
"""
def __init__(
self,
k: int = 1,
temperature: float = 1.0,
with_replacement: bool = False,
):
self.k = k
self.temperature = temperature or 1.0
self.with_replacement = with_replacement
@overrides
def sample_nodes(
self, log_probs: torch.Tensor, per_node_beam_size: int, state: StateType
) -> Tuple[torch.Tensor, torch.Tensor, StateType]:
if not per_node_beam_size <= self.k <= log_probs.size()[1]:
raise ValueError(
"k must be a postive integer no less than per_node_beam_size and no greater than vocabulary size"
)
# shape (both): (batch_size, k)
top_k_log_probs, top_k_indices = log_probs.topk(self.k, dim=-1)
# Apply temperature if necessary.
# shape: (batch_size, k)
if self.temperature != 1.0:
top_k_log_probs = top_k_log_probs / self.temperature
# Re-normalize the subset.
# shape: (batch_size, k)
normalized_top_k_probs = torch.nn.functional.softmax(top_k_log_probs, dim=-1)
# Sample from the re-normalized subset.
# NOTE: These indices are not indices into `log_probs`, they are indices into `top_k_log_probs`.
# shape: (batch_size, per_node_beam_size)
sampled_indices = torch.multinomial(
normalized_top_k_probs, per_node_beam_size, replacement=self.with_replacement
)
# Convert `sampled_indices` back to indices in the original `log_probs` tensor.
# shape: (batch_size, per_node_beam_size)
indices = top_k_indices.gather(-1, sampled_indices)
return log_probs.gather(1, indices), indices, state
@Sampler.register("top-p")
class TopPSampler(Sampler):
"""
A `Sampler` which redistributes the probability mass function for nodes among
the top choices with a cumulative probability of at least `p`, then samples from that subset
after re-normalizing the probabilities.
Beams are sampled in the default, deterministic way.
# Parameters
p : `float`, optional (default = `0.9`)
The cumulative probability cutoff threshold. A higher value of `p` will result in more possible
examples to sample from. If `with_replacement` is `False` and the number of possible samples is
insufficient to sample without replacement from when calling `sample_nodes`, then the top
`per_node_beam_size` examples will be chosen.
temperature : `float`, optional (default = `1.0`)
A `temperature` below 1.0 produces a sharper probability distribution and a `temperature`
above 1.0 produces a flatter probability distribution.
with_replacement : `bool`, optional, (default = `False`)
If set to `True`, samples will be selected with replacement from the top choices.
"""
def __init__(
self,
p: float = 0.9,
temperature: float = 1.0,
with_replacement: bool = False,
):
if p < 0.0 or p > 1.0:
raise ValueError("p must be a positive float no greater than 1.0")
self.p = p
self.temperature = temperature or 1.0
self.with_replacement = with_replacement
@overrides
def sample_nodes(
self, log_probs: torch.Tensor, per_node_beam_size: int, state: StateType
) -> Tuple[torch.Tensor, torch.Tensor, StateType]:
if not per_node_beam_size <= log_probs.size()[1]:
raise ValueError("per_node_beam_size cannot be greater than vocabulary size")
# First apply temperature coefficient:
if self.temperature != 1.0:
_log_probs = torch.nn.functional.log_softmax(log_probs / self.temperature, dim=-1)
else:
_log_probs = log_probs
# Sort the probabilities in descending order to then find cumulative sum
log_probs_descending, sorting_indices = torch.sort(_log_probs, descending=True)
# shape: (batch_size, num_classes)
probabilities_descending = log_probs_descending.exp()
probabilities_summed = torch.cumsum(probabilities_descending, dim=-1)
# Create a mask for filtering out probabilities that don't make the top `p`.
# shape: (batch_size, num_classes)
exclusion_mask = probabilities_summed >= self.p
# We want to include the first index where probabilities_summed >= p, so we shift over one.
exclusion_mask[..., 1:] = exclusion_mask[..., :-1].clone()
exclusion_mask[..., 0] = False
# Make sure there's at least `per_node_beam_size` options to be selected.
if not self.with_replacement:
exclusion_mask[..., :per_node_beam_size] = False
log_probs_descending[exclusion_mask] = min_value_of_dtype(log_probs.dtype)
# Now re-normalized the included log probs.
# shape: (batch_size, num_classes)
filtered_probabilities = torch.nn.functional.softmax(log_probs_descending, dim=-1)
# Sample from the re-normalized subset.
# NOTE: These indices are not indices into `log_probs`, they are indices into `log_probs_descending`.
# shape: (batch_size, per_node_beam_size)
sampled_indices = torch.multinomial(
filtered_probabilities, per_node_beam_size, replacement=self.with_replacement
)
# Convert `sampled_indices` back to indices in the original `log_probs` tensor.
# shape: (batch_size, per_node_beam_size)
selected_indices = sorting_indices.gather(-1, sampled_indices)
# Return (selected log probabilities, selected classes)
# shape: (len(log_probs),1) , (len(log_probs), 1)
return torch.gather(log_probs, 1, selected_indices), selected_indices, state
@Sampler.register("gumbel")
class GumbelSampler(Sampler):
"""
A `Sampler` which uses the Gumbel-Top-K trick to sample without replacement. See
[*Stochastic Beams and Where to Find Them: The Gumbel-Top-k Trick for Sampling
Sequences Without Replacement*, W Kool, H Van Hoof and M Welling, 2010]
(https://api.semanticscholar.org/CorpusID:76662039).
# Parameters
temperature : `float`, optional (default = `1.0`)
A `temperature` below 1.0 produces a sharper probability distribution and a `temperature`
above 1.0 produces a flatter probability distribution.
"""
def __init__(self, temperature: float = 1.0):
self.temperature = temperature
@overrides
def init_state(
self, start_class_log_probabilities: torch.Tensor, batch_size: int, num_classes: int
) -> StateType:
# shape: (batch_size, num_classes)
zeros = start_class_log_probabilities.new_zeros((batch_size, num_classes))
# shape: (batch_size, num_classes)
G_phi_S = self.gumbel_with_max(start_class_log_probabilities, zeros)
return {"G_phi_S": G_phi_S}
@overrides
def sample_nodes(
self,
log_probs: torch.Tensor,
per_node_beam_size: int,
state: StateType,
) -> Tuple[torch.Tensor, torch.Tensor, StateType]:
# First apply temperature coefficient:
# shape: (batch_size * beam_size, num_classes)
if self.temperature != 1.0:
_log_probs = torch.nn.functional.log_softmax(log_probs / self.temperature, dim=-1)
else:
_log_probs = log_probs
# shape: (group_size,)
phi_S = state["phi_S"]
# shape: (group_size, num_classes)
phi_S = phi_S.unsqueeze(-1).expand_as(_log_probs)
# shape: (group_size, num_classes)
phi_S_new = phi_S + _log_probs
# shape: (group_size, 1)
G_phi_S = state["G_phi_S"].unsqueeze(-1)
# shape: (group_size, num_classes)
G_phi_S_new = self.gumbel_with_max(phi_S_new, G_phi_S)
# Replace NaNs with very negative number.
# shape: (group_size, num_classes)
# G_phi_S_new[G_phi_S_new.isnan()] = min_value_of_dtype(G_phi_S_new.dtype)
# shape (both): (group_size, per_node_beam_size)
top_G_phi_S_new, top_indices = torch.topk(G_phi_S_new, per_node_beam_size, dim=-1)
# shape: (group_size, per_node_beam_size)
top_log_probs = log_probs.gather(1, top_indices)
return top_log_probs, top_indices, {"G_phi_S": top_G_phi_S_new}
@overrides
def sample_beams(
self,
log_probs: torch.Tensor,
beam_size: int,
state: StateType,
) -> Tuple[torch.Tensor, torch.Tensor, StateType]:
"""
Returns the beams with the highest perturbed log probabilities.
"""
# shape (log_probs): (batch_size, beam_size * per_node_beam_size)
batch_size = log_probs.size()[0]
# shape: (batch_size * beam_size, per_node_beam_size)
G_phi_S = state["G_phi_S"]
# shape: (batch_size, beam_size * per_node_beam_size)
G_phi_S = G_phi_S.reshape_as(log_probs)
# shape (both): (batch_size, beam_size)
G_phi_S_new, selected_indices = torch.topk(G_phi_S, beam_size, dim=-1)
# shape: (batch_size, beam_size)
selected_log_probs = log_probs.gather(1, selected_indices)
# Now sort the selected beams by their true log prob.
# shape (all): (batch_size, beam_size)
selected_log_probs, sort_indices = selected_log_probs.sort(dim=-1, descending=True)
selected_indices = selected_indices.gather(1, sort_indices)
G_phi_S_new = G_phi_S_new.gather(1, sort_indices)
# shape: (batch_size * beam_size,)
G_phi_S_new = G_phi_S_new.reshape(batch_size * beam_size)
# shape: (batch_size * beam_size,)
phi_S = selected_log_probs.reshape(batch_size * beam_size)
return selected_log_probs, selected_indices, {"G_phi_S": G_phi_S_new, "phi_S": phi_S}
def gumbel(self, phi) -> torch.Tensor:
"""
Sample `Gumbel(phi)`.
`phi` should have shape `(batch_size, num_classes)`.
"""
return -torch.log(-torch.log(torch.rand_like(phi))) + phi
def gumbel_with_max(self, phi, T) -> torch.Tensor:
"""
Sample `Gumbel(phi)` conditioned on the maximum value being equal to `T`.
`phi` should have shape `(batch_size, num_classes)` and `T` should have
shape `(batch_size, 1)`.
"""
# Shape: (batch_size, num_classes)
G_phi = self.gumbel(phi)
# Now we find the maximum from these samples.
# Shape: (batch_size, )
Z, _ = G_phi.max(dim=-1)
# Shape: (batch_size, num_classes)
v = T - G_phi + torch.log1p(-torch.exp(G_phi - Z.unsqueeze(-1)))
# Shape: (batch_size, num_classes)
return T - torch.nn.functional.relu(v) - torch.log1p(torch.exp(-v.abs()))
class BeamSearch(FromParams):
"""
Implements the beam search algorithm for decoding the most likely sequences.
# Parameters
end_index : `int`
The index of the "stop" or "end" token in the target vocabulary.
max_steps : `int`, optional (default = `50`)
The maximum number of decoding steps to take, i.e. the maximum length
of the predicted sequences.
beam_size : `int`, optional (default = `10`)
The width of the beam used.
per_node_beam_size : `int`, optional (default = `beam_size`)
The maximum number of candidates to consider per node, at each step in the search.
If not given, this just defaults to `beam_size`. Setting this parameter
to a number smaller than `beam_size` may give better results, as it can introduce
more diversity into the search. See
[*Beam Search Strategies for Neural Machine Translation*, Freitag and Al-Onaizan, 2017]
(https://api.semanticscholar.org/CorpusID:2229477).
sampler : `Sampler`, optional (default = `None`)
An optional `Sampler` which is used to pick next candidate nodes and beams.
If not specified, `DeterministicSampler` will be used, which just takes the
`per_node_beam_size` most likely nodes and the `beam_size` most likely beams.
Using the [`GumbelSampler`](#gumbelsampler), on the other hand, will give you
[Stochastic Beam Search](https://api.semanticscholar.org/CorpusID:76662039).
"""
def __init__(
self,
end_index: int,
max_steps: int = 50,
beam_size: int = 10,
per_node_beam_size: int = None,
sampler: Sampler = None,
) -> None:
if not max_steps > 0:
raise ValueError("max_steps must be positive")
if not beam_size > 0:
raise ValueError("beam_size must be positive")
if per_node_beam_size is not None and not per_node_beam_size > 0:
raise ValueError("per_node_beam_size must be positive")
self._end_index = end_index
self.max_steps = max_steps
self.beam_size = beam_size
self.per_node_beam_size = per_node_beam_size or beam_size
self.sampler = sampler or DeterministicSampler()
@staticmethod
def _reconstruct_sequences(predictions, backpointers):
# Reconstruct the sequences.
# shape: [(batch_size, beam_size, 1)]
reconstructed_predictions = [predictions[-1].unsqueeze(2)]
if not backpointers:
return reconstructed_predictions
# shape: (batch_size, beam_size)
cur_backpointers = backpointers[-1]
for timestep in range(len(predictions) - 2, 0, -1):
# shape: (batch_size, beam_size, 1)
cur_preds = predictions[timestep].gather(1, cur_backpointers).unsqueeze(2)
reconstructed_predictions.append(cur_preds)
# shape: (batch_size, beam_size)
cur_backpointers = backpointers[timestep - 1].gather(1, cur_backpointers)
# shape: (batch_size, beam_size, 1)
final_preds = predictions[0].gather(1, cur_backpointers).unsqueeze(2)
reconstructed_predictions.append(final_preds)
return reconstructed_predictions
@torch.no_grad()
def search(
self,
start_predictions: torch.Tensor,
start_state: StateType,
step: StepFunctionType,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Given a starting state and a step function, apply beam search to find the
most likely target sequences.
# Notes
If your step function returns `-inf` for some log probabilities
(like if you're using a masked log-softmax) then some of the "best"
sequences returned may also have `-inf` log probability. Specifically
this happens when the beam size is smaller than the number of actions
with finite log probability (non-zero probability) returned by the step function.
Therefore if you're using a mask you may want to check the results from `search`
and potentially discard sequences with non-finite log probability.
# Parameters
start_predictions : `torch.Tensor`
A tensor containing the initial predictions with shape `(batch_size,)`.
Usually the initial predictions are just the index of the "start" token
in the target vocabulary.
start_state : `StateType`
The initial state passed to the `step` function. Each value of the state dict
should be a tensor of shape `(batch_size, *)`, where `*` means any other
number of dimensions.
step : `StepFunctionType`
A function that is responsible for computing the next most likely tokens,
given the current state and the predictions from the last time step.
The function should accept two or three arguments:
- a tensor of shape `(group_size,)` representing the index of the predicted
tokens from the last time step,
- the current state, a `StateType`, and
- optionally, the timestep, an `int`.
The `group_size` will be `batch_size * beam_size`, except in the initial
step, for which it will just be `batch_size`.
The function is expected to return a tuple, where the first element
is a tensor of shape `(group_size, target_vocab_size)` containing
the log probabilities of the tokens for the next step, and the second
element is the updated state. The tensor in the state should have shape
`(group_size, *)`, where `*` means any other number of dimensions.
# Returns
`Tuple[torch.Tensor, torch.Tensor]`
Tuple of `(predictions, log_probabilities)`, where `predictions`
has shape `(batch_size, beam_size, max_steps)` and `log_probabilities`
has shape `(batch_size, beam_size)`.
"""
step_signature = signature(step)
if len(step_signature.parameters) < 3:
# If the step function we're given does not take the time step argument, wrap it
# in one that does.
old_step = cast(StepFunctionTypeNoTimestep, step)
def new_step(
last_predictions: torch.Tensor, state: Dict[str, torch.Tensor], time_step: int
):
return old_step(last_predictions, state)
return self._search(start_predictions, start_state, new_step)
else:
return self._search(
start_predictions, start_state, cast(StepFunctionTypeWithTimestep, step)
)
def _search(
self,
start_predictions: torch.Tensor,
start_state: StateType,
step: StepFunctionTypeWithTimestep,
) -> Tuple[torch.Tensor, torch.Tensor]:
batch_size = start_predictions.size()[0]
# List of (batch_size, beam_size) tensors. One for each time step. Does not
# include the start symbols, which are implicit.
predictions: List[torch.Tensor] = []
# List of (batch_size, beam_size) tensors. One for each time step. None for
# the first. Stores the index n for the parent prediction, i.e.
# predictions[t-1][i][n], that it came from.
backpointers: List[torch.Tensor] = []
# Calculate the first timestep. This is done outside the main loop
# because we are going from a single decoder input (the output from the
# encoder) to the top `beam_size` decoder outputs. On the other hand,
# within the main loop we are going from the `beam_size` elements of the
# beam to `beam_size`^2 candidates from which we will select the top
# `beam_size` elements for the next iteration.
# shape: (batch_size, num_classes)
start_class_log_probabilities, state = step(start_predictions, start_state, 0)
num_classes = start_class_log_probabilities.size()[1]
# Make sure `per_node_beam_size` is not larger than `num_classes`.
if self.per_node_beam_size > num_classes:
raise ConfigurationError(
f"Target vocab size ({num_classes:d}) too small "
f"relative to per_node_beam_size ({self.per_node_beam_size:d}).\n"
f"Please decrease beam_size or per_node_beam_size."
)
sampler_state = self.sampler.init_state(
start_class_log_probabilities, batch_size, num_classes
)
# Get the initial predicted classed and their log probabilities.
# shape: (batch_size, beam_size), (batch_size, beam_size)
(
start_top_log_probabilities,
start_predicted_classes,
sampler_state,
) = self.sampler.sample_beams(start_class_log_probabilities, self.beam_size, sampler_state)
if self.beam_size == 1 and (start_predicted_classes == self._end_index).all():
warnings.warn(
"Empty sequences predicted. You may want to increase the beam size or ensure "
"your step function is working properly.",
RuntimeWarning,
)
return start_predicted_classes.unsqueeze(-1), start_top_log_probabilities
# The log probabilities for the last time step.
# shape: (batch_size, beam_size)
last_log_probabilities = start_top_log_probabilities
# shape: [(batch_size, beam_size)]
predictions.append(start_predicted_classes)
# Log probability tensor that mandates that the end token is selected.
# shape: (batch_size * beam_size, num_classes)
log_probs_after_end = start_class_log_probabilities.new_full(
(batch_size * self.beam_size, num_classes), float("-inf")
)
log_probs_after_end[:, self._end_index] = 0.0
# Set the same state for each element in the beam.
self._update_initial_state(state, batch_size)
for timestep in range(self.max_steps - 1):
# shape: (batch_size * beam_size,)
last_predictions = predictions[-1].reshape(batch_size * self.beam_size)
# If every predicted token from the last step is `self._end_index`,
# then we can stop early.
if (last_predictions == self._end_index).all():
break
# Take a step. This get the predicted log probs of the next classes
# and updates the state.
# shape: (batch_size * beam_size, num_classes)
class_log_probabilities, state = step(last_predictions, state, timestep + 1)
# shape: (batch_size * beam_size, num_classes)
last_predictions_expanded = last_predictions.unsqueeze(-1).expand(
batch_size * self.beam_size, num_classes
)
# Here we are finding any beams where we predicted the end token in
# the previous timestep and replacing the distribution with a
# one-hot distribution, forcing the beam to predict the end token
# this timestep as well.
# shape: (batch_size * beam_size, num_classes)
cleaned_log_probabilities = torch.where(
last_predictions_expanded == self._end_index,
log_probs_after_end,
class_log_probabilities,
)
# shape (both): (batch_size * beam_size, per_node_beam_size)
top_log_probabilities, predicted_classes, sampler_state = self.sampler.sample_nodes(
cleaned_log_probabilities, self.per_node_beam_size, sampler_state
)
# Here we expand the last log probabilities to (batch_size * beam_size, per_node_beam_size)
# so that we can add them to the current log probs for this timestep.
# This lets us maintain the log probability of each element on the beam.
# shape: (batch_size * beam_size, per_node_beam_size)
expanded_last_log_probabilities = (
last_log_probabilities.unsqueeze(2)
.expand(batch_size, self.beam_size, self.per_node_beam_size)
.reshape(batch_size * self.beam_size, self.per_node_beam_size)
)
# shape: (batch_size * beam_size, per_node_beam_size)
summed_top_log_probabilities = top_log_probabilities + expanded_last_log_probabilities
# shape: (batch_size, beam_size * per_node_beam_size)
reshaped_summed = summed_top_log_probabilities.reshape(
batch_size, self.beam_size * self.per_node_beam_size
)
# shape: (batch_size, beam_size * per_node_beam_size)
reshaped_predicted_classes = predicted_classes.reshape(
batch_size, self.beam_size * self.per_node_beam_size
)
# Keep only the top `beam_size` beam indices.
# shape (both): (batch_size, beam_size)
(
restricted_beam_log_probs,
restricted_beam_indices,
sampler_state,
) = self.sampler.sample_beams(reshaped_summed, self.beam_size, sampler_state)
# Use the beam indices to extract the corresponding classes.
# shape: (batch_size, beam_size)
restricted_predicted_classes = reshaped_predicted_classes.gather(
1, restricted_beam_indices
)
predictions.append(restricted_predicted_classes)
# shape: (batch_size, beam_size)
last_log_probabilities = restricted_beam_log_probs
# The beam indices come from a `beam_size * per_node_beam_size` dimension where the
# indices with a common ancestor are grouped together. Hence
# dividing by per_node_beam_size gives the ancestor. (Note that this is integer
# division as the tensor is a LongTensor.)
# shape: (batch_size, beam_size)
backpointer = restricted_beam_indices // self.per_node_beam_size
backpointers.append(backpointer)
# Keep only the pieces of the state tensors corresponding to the
# ancestors created this iteration.
self._update_state(state, backpointer)
if not torch.isfinite(last_log_probabilities).all():
warnings.warn(
"Infinite log probabilities encountered. Some final sequences may not make sense. "
"This can happen when the beam size is larger than the number of valid (non-zero "
"probability) transitions that the step function produces.",
RuntimeWarning,
)
reconstructed_predictions = self._reconstruct_sequences(predictions, backpointers)
# shape: (batch_size, beam_size, max_steps)
all_predictions = torch.cat(list(reversed(reconstructed_predictions)), 2)
return all_predictions, last_log_probabilities
@staticmethod
def _is_multilayer_rnn_decoder(key: str, state_tensor: torch.Tensor) -> bool:
return state_tensor.dim() == 3 and key in {
"decoder_hidden",
"decoder_context",
}
def _update_initial_state(self, state: StateType, batch_size: int):
"""
Expand tensors in a state dictionary from `(batch_size, *)` to `(batch_size * beam_size, *)`.
"""
for key, state_tensor in state.items():
if state_tensor is None:
continue
multilayer_rnn_decoder = self._is_multilayer_rnn_decoder(key, state_tensor)
if multilayer_rnn_decoder:
# shape: (num_layers, batch_size * beam_size, *)
num_layers, _, *last_dims = state_tensor.size()
state[key] = (
state_tensor.unsqueeze(2)
.expand(num_layers, batch_size, self.beam_size, *last_dims)
.reshape(num_layers, batch_size * self.beam_size, *last_dims)
)
else:
# shape: (batch_size * beam_size, *)
_, *last_dims = state_tensor.size()
state[key] = (
state_tensor.unsqueeze(1)
.expand(batch_size, self.beam_size, *last_dims)
.reshape(batch_size * self.beam_size, *last_dims)
)
def _update_state(self, state: StateType, backpointer: torch.Tensor):
batch_size = backpointer.size()[0]
for key, state_tensor in state.items():
if state_tensor is None:
continue
multilayer_rnn_decoder = self._is_multilayer_rnn_decoder(key, state_tensor)
if multilayer_rnn_decoder:
# shape: (num_layers, batch_size * beam_size, *)
num_layers, _, *last_dims = state_tensor.size()
expanded_backpointer = backpointer.view(
batch_size, self.beam_size, *([1] * len(last_dims))
).expand(batch_size, self.beam_size, *last_dims)
expanded_backpointer = expanded_backpointer.unsqueeze(0).repeat(num_layers, 1, 1, 1)
# shape: (num_layers, batch_size * beam_size, *)
state[key] = (
state_tensor.reshape(num_layers, batch_size, self.beam_size, *last_dims)
.gather(2, expanded_backpointer)
.reshape(num_layers, batch_size * self.beam_size, *last_dims)
)
else:
_, *last_dims = state_tensor.size()
# shape: (batch_size, beam_size, *)
expanded_backpointer = backpointer.view(
batch_size, self.beam_size, *([1] * len(last_dims))
).expand(batch_size, self.beam_size, *last_dims)
# shape: (batch_size * beam_size, *)
state[key] = (
state_tensor.reshape(batch_size, self.beam_size, *last_dims)
.gather(1, expanded_backpointer)
.reshape(batch_size * self.beam_size, *last_dims)
)
| allennlp-master | allennlp/nn/beam_search.py |
"""
An initializer is just a PyTorch function.
Here we implement a proxy class that allows us
to register them and supply any additional function arguments
(for example, the `mean` and `std` of a normal initializer)
as named arguments to the constructor.
The available initialization functions are
* ["normal"](https://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.normal_)
* ["uniform"](https://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.uniform_)
* ["constant"](https://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.constant_)
* ["eye"](https://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.eye_)
* ["dirac"](https://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.dirac_)
* ["xavier_uniform"](https://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.xavier_uniform_)
* ["xavier_normal"](https://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.xavier_normal_)
* ["kaiming_uniform"](https://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.kaiming_uniform_)
* ["kaiming_normal"](https://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.kaiming_normal_)
* ["orthogonal"](https://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.orthogonal_)
* ["sparse"](https://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.sparse_)
* ["block_orthogonal"](./initializers.md#block_orthogonal)
* ["uniform_unit_scaling"](./initializers.md#uniform_unit_scaling)
* ["pretrained"](./initializers.md#PretrainedModelInitializer)
"""
import logging
import re
import math
from typing import Callable, List, Tuple, Dict
import itertools
from overrides import overrides
import tarfile
import torch
import torch.nn.init
from allennlp.common import FromParams, Registrable
from allennlp.common.checks import ConfigurationError
logger = logging.getLogger(__name__)
class Initializer(Registrable):
"""
An initializer is really just a bare pytorch function. This class
is a proxy that allows us to implement `Registrable` for those functions.
"""
default_implementation = "normal"
def __call__(self, tensor: torch.Tensor, **kwargs) -> None:
"""
This function is here just to make mypy happy. We expect initialization functions to
follow this API; the builtin pytorch initialization functions follow this just fine, even
though they don't subclass `Initialization`. We're just making it explicit here, so mypy
knows that initializers are callable like this.
"""
raise NotImplementedError
def uniform_unit_scaling(tensor: torch.Tensor, nonlinearity: str = "linear"):
"""
An initaliser which preserves output variance for approximately gaussian
distributed inputs. This boils down to initialising layers using a uniform
distribution in the range `(-sqrt(3/dim[0]) * scale, sqrt(3 / dim[0]) * scale)`, where
`dim[0]` is equal to the input dimension of the parameter and the `scale`
is a constant scaling factor which depends on the non-linearity used.
See `Random Walk Initialisation for Training Very Deep Feedforward Networks
<https://www.semanticscholar.org/paper/Random-Walk-Initialization-for-Training-Very-Deep-Sussillo-Abbott/be9728a0728b6acf7a485225b1e41592176eda0b>`_
for more information.
# Parameters
tensor : `torch.Tensor`, required.
The tensor to initialise.
nonlinearity : `str`, optional (default = `"linear"`)
The non-linearity which is performed after the projection that this
tensor is involved in. This must be the name of a function contained
in the `torch.nn.functional` package.
# Returns
The initialised tensor.
"""
size = 1.0
# Estimate the input size. This won't work perfectly,
# but it covers almost all use cases where this initialiser
# would be expected to be useful, i.e in large linear and
# convolutional layers, as the last dimension will almost
# always be the output size.
for dimension in list(tensor.size())[:-1]:
size *= dimension
activation_scaling = torch.nn.init.calculate_gain(nonlinearity, tensor)
max_value = math.sqrt(3 / size) * activation_scaling
return tensor.data.uniform_(-max_value, max_value)
def block_orthogonal(tensor: torch.Tensor, split_sizes: List[int], gain: float = 1.0) -> None:
"""
An initializer which allows initializing model parameters in "blocks". This is helpful
in the case of recurrent models which use multiple gates applied to linear projections,
which can be computed efficiently if they are concatenated together. However, they are
separate parameters which should be initialized independently.
# Parameters
tensor : `torch.Tensor`, required.
A tensor to initialize.
split_sizes : `List[int]`, required.
A list of length `tensor.ndim()` specifying the size of the
blocks along that particular dimension. E.g. `[10, 20]` would
result in the tensor being split into chunks of size 10 along the
first dimension and 20 along the second.
gain : `float`, optional (default = `1.0`)
The gain (scaling) applied to the orthogonal initialization.
"""
data = tensor.data
sizes = list(tensor.size())
if any(a % b != 0 for a, b in zip(sizes, split_sizes)):
raise ConfigurationError(
"tensor dimensions must be divisible by their respective "
"split_sizes. Found size: {} and split_sizes: {}".format(sizes, split_sizes)
)
indexes = [list(range(0, max_size, split)) for max_size, split in zip(sizes, split_sizes)]
# Iterate over all possible blocks within the tensor.
for block_start_indices in itertools.product(*indexes):
# A list of tuples containing the index to start at for this block
# and the appropriate step size (i.e split_size[i] for dimension i).
index_and_step_tuples = zip(block_start_indices, split_sizes)
# This is a tuple of slices corresponding to:
# tensor[index: index + step_size, ...]. This is
# required because we could have an arbitrary number
# of dimensions. The actual slices we need are the
# start_index: start_index + step for each dimension in the tensor.
block_slice = tuple(
slice(start_index, start_index + step) for start_index, step in index_and_step_tuples
)
data[block_slice] = torch.nn.init.orthogonal_(tensor[block_slice].contiguous(), gain=gain)
def zero(tensor: torch.Tensor) -> None:
return tensor.data.zero_()
def lstm_hidden_bias(tensor: torch.Tensor) -> None:
"""
Initialize the biases of the forget gate to 1, and all other gates to 0,
following Jozefowicz et al., An Empirical Exploration of Recurrent Network Architectures
"""
# gates are (b_hi|b_hf|b_hg|b_ho) of shape (4*hidden_size)
tensor.data.zero_()
hidden_size = tensor.shape[0] // 4
tensor.data[hidden_size : (2 * hidden_size)] = 1.0
class _InitializerWrapper(Initializer):
def __init__(self, init_function: Callable[..., None], **kwargs):
self._init_function = init_function
self._kwargs = kwargs
def __call__(self, tensor: torch.Tensor, **kwargs) -> None:
self._init_function(tensor, **self._kwargs)
def __repr__(self):
return "Init: %s, with params: %s" % (self._init_function, self._kwargs)
@Initializer.register("normal")
class NormalInitializer(_InitializerWrapper):
"""
Registered as an `Initializer` with name "normal".
"""
def __init__(self, mean: float = 0.0, std: float = 0.1):
super().__init__(init_function=torch.nn.init.normal_, mean=mean, std=std)
@Initializer.register("orthogonal")
class OrthogonalInitializer(_InitializerWrapper):
"""
Registered as an `Initializer` with name "orthogonal".
"""
def __init__(self, gain: float = 1.0):
super().__init__(init_function=torch.nn.init.orthogonal_, gain=gain)
@Initializer.register("uniform")
class UniformInitializer(_InitializerWrapper):
"""
Registered as an `Initializer` with name "uniform".
"""
def __init__(self, a: float = 0.0, b: float = 1.0):
super().__init__(init_function=torch.nn.init.uniform_, a=a, b=b)
@Initializer.register("constant")
class ConstantInitializer(_InitializerWrapper):
"""
Registered as an `Initializer` with name "constant".
"""
def __init__(self, val: float):
super().__init__(init_function=torch.nn.init.constant_, val=val)
@Initializer.register("dirac")
class DiracInitializer(_InitializerWrapper):
"""
Registered as an `Initializer` with name "dirac".
"""
def __init__(self):
super().__init__(init_function=torch.nn.init.dirac_)
@Initializer.register("xavier_uniform")
class XavierUniformInitializer(_InitializerWrapper):
"""
Registered as an `Initializer` with name "xavir_uniform".
"""
def __init__(self, gain: float = 1.0):
super().__init__(init_function=torch.nn.init.xavier_uniform_, gain=gain)
@Initializer.register("xavier_normal")
class XavierNormalInitializer(_InitializerWrapper):
"""
Registered as an `Initializer` with name "xavier_normal".
"""
def __init__(self, gain: float = 1.0):
super().__init__(init_function=torch.nn.init.xavier_normal_, gain=gain)
@Initializer.register("kaiming_uniform")
class KaimingUniformInitializer(_InitializerWrapper):
"""
Registered as an `Initializer` with name "kaiming_uniform".
"""
def __init__(self, a: float = 0.0, mode: str = "fan_in", nonlinearity: str = "leaky_relu"):
super().__init__(
init_function=torch.nn.init.kaiming_uniform_, a=a, mode=mode, nonlinearity=nonlinearity
)
@Initializer.register("kaiming_normal")
class KaimingNormalInitializer(_InitializerWrapper):
"""
Registered as an `Initializer` with name "kaiming_normal".
"""
def __init__(self, a: float = 0.0, mode: str = "fan_in", nonlinearity: str = "leaky_relu"):
super().__init__(
init_function=torch.nn.init.kaiming_normal_, a=a, mode=mode, nonlinearity=nonlinearity
)
@Initializer.register("sparse")
class SparseInitializer(_InitializerWrapper):
"""
Registered as an `Initializer` with name "sparse".
"""
def __init__(self, sparsity: float, std: float = 0.01):
super().__init__(init_function=torch.nn.init.sparse_, sparsity=sparsity, std=std)
@Initializer.register("eye")
class EyeInitializer(_InitializerWrapper):
"""
Registered as an `Initializer` with name "eye".
"""
def __init__(self):
super().__init__(init_function=torch.nn.init.eye_)
@Initializer.register("block_orthogonal")
class BlockOrthogonalInitializer(_InitializerWrapper):
"""
Registered as an `Initializer` with name "block_orthogonal".
"""
def __init__(self, split_sizes: List[int], gain: float = 1.0):
super().__init__(init_function=block_orthogonal, split_sizes=split_sizes, gain=gain)
@Initializer.register("uniform_unit_scaling")
class UniformUnitScalingInitializer(_InitializerWrapper):
"""
Registered as an `Initializer` with name "uniform_unit_scaling".
"""
def __init__(self, nonlinearity: str = "linear"):
super().__init__(init_function=uniform_unit_scaling, nonlinearity=nonlinearity)
@Initializer.register("zero")
class ZeroInitializer(_InitializerWrapper):
"""
Registered as an `Initializer` with name "zero".
"""
def __init__(self):
super().__init__(init_function=zero)
@Initializer.register("lstm_hidden_bias")
class LstmHiddenBiasInitializer(_InitializerWrapper):
"""
Registered as an `Initializer` with name "lstm_hidden_bias".
"""
def __init__(self):
super().__init__(init_function=lstm_hidden_bias)
@Initializer.register("pretrained")
class PretrainedModelInitializer(Initializer):
"""
An initializer which allows initializing parameters using a pretrained model. The
initializer will load all of the weights from the `weights_file_path` and use the
name of the new parameters to index into the pretrained parameters. Therefore,
by default, the names of the new and pretrained parameters must be the same.
However, this behavior can be overridden using the `parameter_name_overrides`,
which remaps the name of the new parameter to the key which should be used
to index into the pretrained parameters.
The initializer will load all of the weights from the `weights_file_path`
regardless of which parameters will actually be used to initialize the new model.
So, if you need to initialize several parameters using a pretrained model, the most
memory-efficient way to do this is to use one `PretrainedModelInitializer` per
weights file and use a regex to match all of the new parameters which need to be
initialized.
If you are using a configuration file to instantiate this object, the below entry
in the `InitializerApplicator` parameters will initialize `linear_1.weight` and
`linear_2.weight` using a pretrained model. `linear_1.weight` will be initialized
to the pretrained parameters called `linear_1.weight`, but `linear_2.weight` will
be initialized to the pretrained parameters called `linear_3.weight`::
```
["linear_1.weight|linear_2.weight",
{
"type": "pretrained",
"weights_file_path": "best.th",
"parameter_name_overrides": {
"linear_2.weight": "linear_3.weight"
}
}
]
```
To initialize weights for all the parameters from a pretrained model (assuming their names
remain unchanged), use the following instead:
```
[".*",
{
"type": "pretrained",
"weights_file_path": "best.th",
"parameter_name_overrides": {}
}
]
```
Registered as an `Initializer` with name "pretrained".
# Parameters
weights_file_path : `str`, required
The path to the weights file which has the pretrained model parameters.
parameter_name_overrides : `Dict[str, str]`, optional (default = `None`)
The mapping from the new parameter name to the name which should be used
to index into the pretrained model parameters. If a parameter name is not
specified, the initializer will use the parameter's default name as the key.
"""
def __init__(
self, weights_file_path: str, parameter_name_overrides: Dict[str, str] = None
) -> None:
from allennlp.models.archival import (
extracted_archive,
get_weights_path,
) # import here to avoid circular imports
self.weights: Dict[str, torch.Tensor]
if tarfile.is_tarfile(weights_file_path):
with extracted_archive(weights_file_path) as extraction_path:
self.weights = torch.load(get_weights_path(extraction_path), map_location="cpu")
else:
self.weights = torch.load(weights_file_path, map_location="cpu")
self.parameter_name_overrides = parameter_name_overrides or {}
@overrides
def __call__(self, tensor: torch.Tensor, parameter_name: str, **kwargs) -> None: # type: ignore
# Select the new parameter name if it's being overridden
if parameter_name in self.parameter_name_overrides:
parameter_name = self.parameter_name_overrides[parameter_name]
# If the size of the source and destination tensors are not the
# same, then we need to raise an error
source_weights = self.weights[parameter_name]
if tensor.data.size() != source_weights.size():
raise ConfigurationError(
"Incompatible sizes found for parameter %s. "
"Found %s and %s" % (parameter_name, tensor.data.size(), source_weights.size())
)
# Copy the parameters from the source to the destination
tensor.data.copy_(source_weights.data)
class InitializerApplicator(FromParams):
"""
Applies initializers to the parameters of a Module based on regex matches. Any parameter not
explicitly matching a regex will not be initialized, instead using whatever the default
initialization was in the module's code.
If you are instantiating this object from a config file, an example configuration is as
follows:
```json
{
"regexes": [
["parameter_regex_match1",
{
"type": "normal"
"mean": 0.01
"std": 0.1
}
],
["parameter_regex_match2", "uniform"]
],
"prevent_regexes": ["prevent_init_regex"]
}
```
where the first item in each tuple under the `regexes` parameters is the regex that matches to
parameters, and the second item specifies an `Initializer.` These values can either be strings,
in which case they correspond to the names of initializers, or dictionaries, in which case they
must contain the "type" key, corresponding to the name of an initializer. In addition, they may
contain auxiliary named parameters which will be fed to the initializer itself. To determine
valid auxiliary parameters, please refer to the torch.nn.init documentation.
# Parameters
regexes : `List[Tuple[str, Initializer]]`, optional (default = `[]`)
A list mapping parameter regexes to initializers. We will check each parameter against
each regex in turn, and apply the initializer paired with the first matching regex, if
any.
prevent_regexes: `List[str]`, optional (default=`None`)
Any parameter name matching one of these regexes will not be initialized, regardless of
whether it matches one of the regexes passed in the `regexes` parameter.
"""
def __init__(
self, regexes: List[Tuple[str, Initializer]] = None, prevent_regexes: List[str] = None
) -> None:
self._initializers = regexes or []
self._prevent_regex = None
if prevent_regexes:
self._prevent_regex = "(" + ")|(".join(prevent_regexes) + ")"
def __call__(self, module: torch.nn.Module) -> None:
"""
Applies an initializer to all parameters in a module that match one of the regexes we were
given in this object's constructor. Does nothing to parameters that do not match.
# Parameters
module : `torch.nn.Module`, required.
The Pytorch module to apply the initializers to.
"""
logger.info("Initializing parameters")
unused_regexes = {initializer[0] for initializer in self._initializers}
uninitialized_parameters = set()
# Store which initializers were applied to which parameters.
for name, parameter in module.named_parameters():
for initializer_regex, initializer in self._initializers:
allow = self._prevent_regex is None or not bool(
re.search(self._prevent_regex, name)
)
if allow and re.search(initializer_regex, name):
logger.info("Initializing %s using %s initializer", name, initializer_regex)
initializer(parameter, parameter_name=name)
unused_regexes.discard(initializer_regex)
break
else: # no break
uninitialized_parameters.add(name)
for regex in unused_regexes:
logger.warning("Did not use initialization regex that was passed: %s", regex)
logger.info(
"Done initializing parameters; the following parameters are using their "
"default initialization from their code"
)
uninitialized_parameter_list = list(uninitialized_parameters)
uninitialized_parameter_list.sort()
for name in uninitialized_parameter_list:
logger.info(" %s", name)
| allennlp-master | allennlp/nn/initializers.py |
from typing import List, Set, Tuple, Dict
import numpy
from allennlp.common.checks import ConfigurationError
def decode_mst(
energy: numpy.ndarray, length: int, has_labels: bool = True
) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""
Note: Counter to typical intuition, this function decodes the _maximum_
spanning tree.
Decode the optimal MST tree with the Chu-Liu-Edmonds algorithm for
maximum spanning arborescences on graphs.
# Parameters
energy : `numpy.ndarray`, required.
A tensor with shape (num_labels, timesteps, timesteps)
containing the energy of each edge. If has_labels is `False`,
the tensor should have shape (timesteps, timesteps) instead.
length : `int`, required.
The length of this sequence, as the energy may have come
from a padded batch.
has_labels : `bool`, optional, (default = `True`)
Whether the graph has labels or not.
"""
if has_labels and energy.ndim != 3:
raise ConfigurationError("The dimension of the energy array is not equal to 3.")
elif not has_labels and energy.ndim != 2:
raise ConfigurationError("The dimension of the energy array is not equal to 2.")
input_shape = energy.shape
max_length = input_shape[-1]
# Our energy matrix might have been batched -
# here we clip it to contain only non padded tokens.
if has_labels:
energy = energy[:, :length, :length]
# get best label for each edge.
label_id_matrix = energy.argmax(axis=0)
energy = energy.max(axis=0)
else:
energy = energy[:length, :length]
label_id_matrix = None
# get original score matrix
original_score_matrix = energy
# initialize score matrix to original score matrix
score_matrix = numpy.array(original_score_matrix, copy=True)
old_input = numpy.zeros([length, length], dtype=numpy.int32)
old_output = numpy.zeros([length, length], dtype=numpy.int32)
current_nodes = [True for _ in range(length)]
representatives: List[Set[int]] = []
for node1 in range(length):
original_score_matrix[node1, node1] = 0.0
score_matrix[node1, node1] = 0.0
representatives.append({node1})
for node2 in range(node1 + 1, length):
old_input[node1, node2] = node1
old_output[node1, node2] = node2
old_input[node2, node1] = node2
old_output[node2, node1] = node1
final_edges: Dict[int, int] = {}
# The main algorithm operates inplace.
chu_liu_edmonds(
length, score_matrix, current_nodes, final_edges, old_input, old_output, representatives
)
heads = numpy.zeros([max_length], numpy.int32)
if has_labels:
head_type = numpy.ones([max_length], numpy.int32)
else:
head_type = None
for child, parent in final_edges.items():
heads[child] = parent
if has_labels:
head_type[child] = label_id_matrix[parent, child]
return heads, head_type
def chu_liu_edmonds(
length: int,
score_matrix: numpy.ndarray,
current_nodes: List[bool],
final_edges: Dict[int, int],
old_input: numpy.ndarray,
old_output: numpy.ndarray,
representatives: List[Set[int]],
):
"""
Applies the chu-liu-edmonds algorithm recursively
to a graph with edge weights defined by score_matrix.
Note that this function operates in place, so variables
will be modified.
# Parameters
length : `int`, required.
The number of nodes.
score_matrix : `numpy.ndarray`, required.
The score matrix representing the scores for pairs
of nodes.
current_nodes : `List[bool]`, required.
The nodes which are representatives in the graph.
A representative at it's most basic represents a node,
but as the algorithm progresses, individual nodes will
represent collapsed cycles in the graph.
final_edges : `Dict[int, int]`, required.
An empty dictionary which will be populated with the
nodes which are connected in the maximum spanning tree.
old_input : `numpy.ndarray`, required.
old_output : `numpy.ndarray`, required.
representatives : `List[Set[int]]`, required.
A list containing the nodes that a particular node
is representing at this iteration in the graph.
# Returns
Nothing - all variables are modified in place.
"""
# Set the initial graph to be the greedy best one.
parents = [-1]
for node1 in range(1, length):
parents.append(0)
if current_nodes[node1]:
max_score = score_matrix[0, node1]
for node2 in range(1, length):
if node2 == node1 or not current_nodes[node2]:
continue
new_score = score_matrix[node2, node1]
if new_score > max_score:
max_score = new_score
parents[node1] = node2
# Check if this solution has a cycle.
has_cycle, cycle = _find_cycle(parents, length, current_nodes)
# If there are no cycles, find all edges and return.
if not has_cycle:
final_edges[0] = -1
for node in range(1, length):
if not current_nodes[node]:
continue
parent = old_input[parents[node], node]
child = old_output[parents[node], node]
final_edges[child] = parent
return
# Otherwise, we have a cycle so we need to remove an edge.
# From here until the recursive call is the contraction stage of the algorithm.
cycle_weight = 0.0
# Find the weight of the cycle.
index = 0
for node in cycle:
index += 1
cycle_weight += score_matrix[parents[node], node]
# For each node in the graph, find the maximum weight incoming
# and outgoing edge into the cycle.
cycle_representative = cycle[0]
for node in range(length):
if not current_nodes[node] or node in cycle:
continue
in_edge_weight = float("-inf")
in_edge = -1
out_edge_weight = float("-inf")
out_edge = -1
for node_in_cycle in cycle:
if score_matrix[node_in_cycle, node] > in_edge_weight:
in_edge_weight = score_matrix[node_in_cycle, node]
in_edge = node_in_cycle
# Add the new edge score to the cycle weight
# and subtract the edge we're considering removing.
score = (
cycle_weight
+ score_matrix[node, node_in_cycle]
- score_matrix[parents[node_in_cycle], node_in_cycle]
)
if score > out_edge_weight:
out_edge_weight = score
out_edge = node_in_cycle
score_matrix[cycle_representative, node] = in_edge_weight
old_input[cycle_representative, node] = old_input[in_edge, node]
old_output[cycle_representative, node] = old_output[in_edge, node]
score_matrix[node, cycle_representative] = out_edge_weight
old_output[node, cycle_representative] = old_output[node, out_edge]
old_input[node, cycle_representative] = old_input[node, out_edge]
# For the next recursive iteration, we want to consider the cycle as a
# single node. Here we collapse the cycle into the first node in the
# cycle (first node is arbitrary), set all the other nodes not be
# considered in the next iteration. We also keep track of which
# representatives we are considering this iteration because we need
# them below to check if we're done.
considered_representatives: List[Set[int]] = []
for i, node_in_cycle in enumerate(cycle):
considered_representatives.append(set())
if i > 0:
# We need to consider at least one
# node in the cycle, arbitrarily choose
# the first.
current_nodes[node_in_cycle] = False
for node in representatives[node_in_cycle]:
considered_representatives[i].add(node)
if i > 0:
representatives[cycle_representative].add(node)
chu_liu_edmonds(
length, score_matrix, current_nodes, final_edges, old_input, old_output, representatives
)
# Expansion stage.
# check each node in cycle, if one of its representatives
# is a key in the final_edges, it is the one we need.
found = False
key_node = -1
for i, node in enumerate(cycle):
for cycle_rep in considered_representatives[i]:
if cycle_rep in final_edges:
key_node = node
found = True
break
if found:
break
previous = parents[key_node]
while previous != key_node:
child = old_output[parents[previous], previous]
parent = old_input[parents[previous], previous]
final_edges[child] = parent
previous = parents[previous]
def _find_cycle(
parents: List[int], length: int, current_nodes: List[bool]
) -> Tuple[bool, List[int]]:
added = [False for _ in range(length)]
added[0] = True
cycle = set()
has_cycle = False
for i in range(1, length):
if has_cycle:
break
# don't redo nodes we've already
# visited or aren't considering.
if added[i] or not current_nodes[i]:
continue
# Initialize a new possible cycle.
this_cycle = set()
this_cycle.add(i)
added[i] = True
has_cycle = True
next_node = i
while parents[next_node] not in this_cycle:
next_node = parents[next_node]
# If we see a node we've already processed,
# we can stop, because the node we are
# processing would have been in that cycle.
if added[next_node]:
has_cycle = False
break
added[next_node] = True
this_cycle.add(next_node)
if has_cycle:
original = next_node
cycle.add(original)
next_node = parents[original]
while next_node != original:
cycle.add(next_node)
next_node = parents[next_node]
break
return has_cycle, list(cycle)
| allennlp-master | allennlp/nn/chu_liu_edmonds.py |
import re
from typing import List, Tuple
import torch
from allennlp.common import FromParams
from allennlp.nn.regularizers.regularizer import Regularizer
class RegularizerApplicator(FromParams):
"""
Applies regularizers to the parameters of a Module based on regex matches.
"""
def __init__(self, regexes: List[Tuple[str, Regularizer]] = None) -> None:
"""
# Parameters
regexes : `List[Tuple[str, Regularizer]]`, optional (default = `None`)
A sequence of pairs (regex, Regularizer), where each Regularizer
applies to the parameters its regex matches (and that haven't previously
been matched).
"""
self._regularizers = regexes or []
def __call__(self, module: torch.nn.Module) -> torch.Tensor:
"""
# Parameters
module : `torch.nn.Module`, required
The module to regularize.
"""
accumulator = 0.0
for name, parameter in module.named_parameters():
# We first check if the parameter needs gradient updates or not
if parameter.requires_grad:
# For each parameter find the first matching regex.
for regex, regularizer in self._regularizers:
if re.search(regex, name):
penalty = regularizer(parameter)
accumulator = accumulator + penalty
break
return accumulator
| allennlp-master | allennlp/nn/regularizers/regularizer_applicator.py |
"""
This module contains classes representing regularization schemes
as well as a class for applying regularization to parameters.
"""
from allennlp.nn.regularizers.regularizer import Regularizer
from allennlp.nn.regularizers.regularizers import L1Regularizer
from allennlp.nn.regularizers.regularizers import L2Regularizer
from allennlp.nn.regularizers.regularizer_applicator import RegularizerApplicator
| allennlp-master | allennlp/nn/regularizers/__init__.py |
import torch
from allennlp.nn.regularizers.regularizer import Regularizer
@Regularizer.register("l1")
class L1Regularizer(Regularizer):
"""
Represents a penalty proportional to the sum of the absolute values of the parameters
Registered as a `Regularizer` with name "l1".
"""
def __init__(self, alpha: float = 0.01) -> None:
self.alpha = alpha
def __call__(self, parameter: torch.Tensor) -> torch.Tensor:
return self.alpha * torch.sum(torch.abs(parameter))
@Regularizer.register("l2")
class L2Regularizer(Regularizer):
"""
Represents a penalty proportional to the sum of squared values of the parameters
Registered as a `Regularizer` with name "l2".
"""
def __init__(self, alpha: float = 0.01) -> None:
self.alpha = alpha
def __call__(self, parameter: torch.Tensor) -> torch.Tensor:
return self.alpha * torch.sum(torch.pow(parameter, 2))
| allennlp-master | allennlp/nn/regularizers/regularizers.py |
import torch
from allennlp.common import Registrable
class Regularizer(Registrable):
"""
An abstract class representing a regularizer. It must implement
call, returning a scalar tensor.
"""
default_implementation = "l2"
def __call__(self, parameter: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
| allennlp-master | allennlp/nn/regularizers/regularizer.py |
from typing import Optional, Iterable, Dict, Any
from allennlp.common.checks import ConfigurationError
class MetricTracker:
"""
This class tracks a metric during training for the dual purposes of early stopping
and for knowing whether the current value is the best so far. It mimics the PyTorch
`state_dict` / `load_state_dict` interface, so that it can be checkpointed along with
your model and optimizer.
Some metrics improve by increasing; others by decreasing. Here you can either explicitly
supply `should_decrease`, or you can provide a `metric_name` in which case "should decrease"
is inferred from the first character, which must be "+" or "-".
# Parameters
patience : `int`, optional (default = `None`)
If provided, then `should_stop_early()` returns True if we go this
many epochs without seeing a new best value.
metric_name : `str`, optional (default = `None`)
If provided, it's used to infer whether we expect the metric values to
increase (if it starts with "+") or decrease (if it starts with "-").
It's an error if it doesn't start with one of those. If it's not provided,
you should specify `should_decrease` instead.
should_decrease : `str`, optional (default = `None`)
If `metric_name` isn't provided (in which case we can't infer `should_decrease`),
then you have to specify it here.
"""
def __init__(
self, patience: Optional[int] = None, metric_name: str = None, should_decrease: bool = None
) -> None:
self._best_so_far: Optional[float] = None
self._patience = patience
self._epochs_with_no_improvement = 0
self._is_best_so_far = True
self.best_epoch_metrics: Dict[str, float] = {}
self._epoch_number = 0
self.best_epoch: Optional[int] = None
# If the metric name starts with "+", we want it to increase.
# If the metric name starts with "-", we want it to decrease.
# We also allow you to not specify a metric name and just set `should_decrease` directly.
if should_decrease is not None and metric_name is not None:
raise ConfigurationError(
"must specify either `should_decrease` or `metric_name` (but not both)"
)
elif metric_name is not None:
if metric_name[0] == "-":
self._should_decrease = True
elif metric_name[0] == "+":
self._should_decrease = False
else:
raise ConfigurationError("metric_name must start with + or -")
elif should_decrease is not None:
self._should_decrease = should_decrease
else:
raise ConfigurationError(
"must specify either `should_decrease` or `metric_name` (but not both)"
)
def clear(self) -> None:
"""
Clears out the tracked metrics, but keeps the patience and should_decrease settings.
"""
self._best_so_far = None
self._epochs_with_no_improvement = 0
self._is_best_so_far = True
self._epoch_number = 0
self.best_epoch = None
def state_dict(self) -> Dict[str, Any]:
"""
A `Trainer` can use this to serialize the state of the metric tracker.
"""
return {
"best_so_far": self._best_so_far,
"patience": self._patience,
"epochs_with_no_improvement": self._epochs_with_no_improvement,
"is_best_so_far": self._is_best_so_far,
"should_decrease": self._should_decrease,
"best_epoch_metrics": self.best_epoch_metrics,
"epoch_number": self._epoch_number,
"best_epoch": self.best_epoch,
}
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
"""
A `Trainer` can use this to hydrate a metric tracker from a serialized state.
"""
self._best_so_far = state_dict["best_so_far"]
self._patience = state_dict["patience"]
self._epochs_with_no_improvement = state_dict["epochs_with_no_improvement"]
self._is_best_so_far = state_dict["is_best_so_far"]
self._should_decrease = state_dict["should_decrease"]
self.best_epoch_metrics = state_dict["best_epoch_metrics"]
self._epoch_number = state_dict["epoch_number"]
self.best_epoch = state_dict["best_epoch"]
def add_metric(self, metric: float) -> None:
"""
Record a new value of the metric and update the various things that depend on it.
"""
new_best = (
(self._best_so_far is None)
or (self._should_decrease and metric < self._best_so_far)
or (not self._should_decrease and metric > self._best_so_far)
)
if new_best:
self.best_epoch = self._epoch_number
self._is_best_so_far = True
self._best_so_far = metric
self._epochs_with_no_improvement = 0
else:
self._is_best_so_far = False
self._epochs_with_no_improvement += 1
self._epoch_number += 1
def add_metrics(self, metrics: Iterable[float]) -> None:
"""
Helper to add multiple metrics at once.
"""
for metric in metrics:
self.add_metric(metric)
def is_best_so_far(self) -> bool:
"""
Returns true if the most recent value of the metric is the best so far.
"""
return self._is_best_so_far
def should_stop_early(self) -> bool:
"""
Returns true if improvement has stopped for long enough.
"""
if self._patience is None:
return False
else:
return self._epochs_with_no_improvement >= self._patience
| allennlp-master | allennlp/training/metric_tracker.py |
import os
from contextlib import contextmanager
from typing import Any, Dict, Iterator, Tuple
from allennlp.models import Model
from allennlp.training.checkpointer import Checkpointer
from allennlp.training.trainer import Trainer
@Trainer.register("no_op")
class NoOpTrainer(Trainer):
"""
Registered as a `Trainer` with name "no_op".
"""
def __init__(self, serialization_dir: str, model: Model) -> None:
"""
A trivial trainer to assist in making model archives for models that do not actually
require training. For instance, a majority class baseline.
In a typical AllenNLP configuration file, neither the `serialization_dir` nor the `model`
arguments would need an entry.
"""
super().__init__(serialization_dir, cuda_device=-1)
self.model = model
def train(self) -> Dict[str, Any]:
assert self._serialization_dir is not None
self.model.vocab.save_to_files(os.path.join(self._serialization_dir, "vocabulary"))
checkpointer = Checkpointer(self._serialization_dir)
checkpointer.save_checkpoint(epoch=0, trainer=self, is_best_so_far=True)
return {}
@contextmanager
def get_checkpoint_state(self) -> Iterator[Tuple[Dict[str, Any], Dict[str, Any]]]:
yield self.model.state_dict(), {}
| allennlp-master | allennlp/training/no_op_trainer.py |
End of preview. Expand
in Dataset Viewer.
README.md exists but content is empty.
- Downloads last month
- 47