Source code for transformers.generation_stopping_criteria

import time
import warnings
from abc import ABC
from typing import Optional

import torch

from .file_utils import add_start_docstrings


LOGITS_PROCESSOR_INPUTS_DOCSTRING = r"""
    Args:
        input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
            Indices of input sequence tokens in the vocabulary.

            Indices can be obtained using :class:`~transformers.BertTokenizer`. See
            :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
            details.

            `What are input IDs? <../glossary.html#input-ids>`__
        scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.vocab_size)`):
            Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
            or scores for each vocabulary token after SoftMax.
        kwargs:
            Additional stopping critera specific kwargs.

    Return:
        :obj:`bool`. :obj:`False` indicates we should continue, :obj:`True` indicates we should stop.

"""


[docs]class StoppingCriteria(ABC): """Abstract base class for all stopping criteria that can be applied during generation."""
[docs] @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, score: torch.FloatTensor, **kwargs) -> bool: raise NotImplementedError("StoppingCriteria needs to be subclassed")
[docs]class MaxLengthCriteria(StoppingCriteria): """ This class can be used to stop generation whenever the full generated number of tokens exceeds :obj:`max_length`. Keep in mind for decoder-only type of transformers, this will include the initial prompted tokens. Args: max_length (:obj:`int`): The maximum length that the output sequence can have in number of tokens. """ def __init__(self, max_length: int): self.max_length = max_length
[docs] @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: return input_ids.shape[-1] > self.max_length
[docs]class MaxTimeCriteria(StoppingCriteria): """ This class can be used to stop generation whenever the full generation exceeds some amount of time. By default, the time will start being counted when you initialize this function. You can override this by passing an :obj:`initial_time`. Args: max_time (:obj:`float`): The maximum allowed time in seconds for the generation. initial_time (:obj:`float`, `optional`, defaults to :obj:`time.time()`): The start of the generation allowed time. """ def __init__(self, max_time: float, initial_timestamp: Optional[float] = None): self.max_time = max_time self.initial_timestamp = time.time() if initial_timestamp is None else initial_timestamp
[docs] @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: return time.time() - self.initial_timestamp > self.max_time
[docs]class StoppingCriteriaList(list):
[docs] @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: return any(criteria(input_ids, scores) for criteria in self)
def validate_stopping_criteria(stopping_criteria: StoppingCriteriaList, max_length: int): found = False for stopping_criterium in stopping_criteria: if isinstance(stopping_criterium, MaxLengthCriteria): found = True if stopping_criterium.max_length != max_length: warnings.warn( "You set different `max_length` for stopping criteria and `max_length` parameter", UserWarning ) if not found: stopping_criteria.append(MaxLengthCriteria(max_length=max_length))