text
stringlengths
7
318k
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
439
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for OWLv2.""" import warnings from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import ( center_to_corners_format, pad, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import ( TensorType, is_scipy_available, is_torch_available, is_vision_available, logging, requires_backends, ) if is_torch_available(): import torch if is_vision_available(): import PIL if is_scipy_available(): from scipy import ndimage as ndi logger = logging.get_logger(__name__) # Copied from transformers.models.owlvit.image_processing_owlvit._upcast def _upcast(t): # Protects from numerical overflows in multiplications by upcasting to the equivalent higher type if t.is_floating_point(): return t if t.dtype in (torch.float32, torch.float64) else t.float() else: return t if t.dtype in (torch.int32, torch.int64) else t.int() # Copied from transformers.models.owlvit.image_processing_owlvit.box_area def box_area(boxes): """ Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2) coordinates. Args: boxes (`torch.FloatTensor` of shape `(number_of_boxes, 4)`): Boxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2) format with `0 <= x1 < x2` and `0 <= y1 < y2`. Returns: `torch.FloatTensor`: a tensor containing the area for each box. """ boxes = _upcast(boxes) return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) # Copied from transformers.models.owlvit.image_processing_owlvit.box_iou def box_iou(boxes1, boxes2): area1 = box_area(boxes1) area2 = box_area(boxes2) left_top = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] right_bottom = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] width_height = (right_bottom - left_top).clamp(min=0) # [N,M,2] inter = width_height[:, :, 0] * width_height[:, :, 1] # [N,M] union = area1[:, None] + area2 - inter iou = inter / union return iou, union def _preprocess_resize_output_shape(image, output_shape): """Validate resize output shape according to input image. Args: image (`np.ndarray`): Image to be resized. output_shape (`iterable`): Size of the generated output image `(rows, cols[, ...][, dim])`. If `dim` is not provided, the number of channels is preserved. Returns image (`np.ndarray): The input image, but with additional singleton dimensions appended in the case where `len(output_shape) > input.ndim`. output_shape (`Tuple`): The output shape converted to tuple. Raises ------ ValueError: If output_shape length is smaller than the image number of dimensions. Notes ----- The input image is reshaped if its number of dimensions is not equal to output_shape_length. """ output_shape = tuple(output_shape) output_ndim = len(output_shape) input_shape = image.shape if output_ndim > image.ndim: # append dimensions to input_shape input_shape += (1,) * (output_ndim - image.ndim) image = np.reshape(image, input_shape) elif output_ndim == image.ndim - 1: # multichannel case: append shape of last axis output_shape = output_shape + (image.shape[-1],) elif output_ndim < image.ndim: raise ValueError("output_shape length cannot be smaller than the " "image number of dimensions") return image, output_shape def _clip_warp_output(input_image, output_image): """Clip output image to range of values of input image. Note that this function modifies the values of *output_image* in-place. Taken from: https://github.com/scikit-image/scikit-image/blob/b4b521d6f0a105aabeaa31699949f78453ca3511/skimage/transform/_warps.py#L640. Args: input_image : ndarray Input image. output_image : ndarray Output image, which is modified in-place. """ min_val = np.min(input_image) if np.isnan(min_val): # NaNs detected, use NaN-safe min/max min_func = np.nanmin max_func = np.nanmax min_val = min_func(input_image) else: min_func = np.min max_func = np.max max_val = max_func(input_image) output_image = np.clip(output_image, min_val, max_val) return output_image class Owlv2ImageProcessor(BaseImageProcessor): r""" Constructs an OWLv2 image processor. Args: do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overriden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overriden by `rescale_factor` in the `preprocess` method. do_pad (`bool`, *optional*, defaults to `True`): Whether to pad the image to a square with gray pixels on the bottom and the right. Can be overriden by `do_pad` in the `preprocess` method. do_resize (`bool`, *optional*, defaults to `True`): Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overriden by `do_resize` in the `preprocess` method. size (`Dict[str, int]` *optional*, defaults to `{"height": 960, "width": 960}`): Size to resize the image to. Can be overriden by `size` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): Resampling method to use if resizing the image. Can be overriden by `resample` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `OPENAI_CLIP_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `OPENAI_CLIP_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. """ model_input_names = ["pixel_values"] def __init__( self, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_pad: bool = True, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, **kwargs, ) -> None: super().__init__(**kwargs) self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_pad = do_pad self.do_resize = do_resize self.size = size if size is not None else {"height": 960, "width": 960} self.resample = resample self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD def pad( self, image: np.array, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): """ Pad an image to a square with gray pixels on the bottom and the right, as per the original OWLv2 implementation. Args: image (`np.ndarray`): Image to pad. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred from the input image. """ height, width = get_image_size(image) size = max(height, width) image = pad( image=image, padding=((0, size - height), (0, size - width)), constant_values=0.5, data_format=data_format, input_data_format=input_data_format, ) return image def resize( self, image: np.ndarray, size: Dict[str, int], anti_aliasing: bool = True, anti_aliasing_sigma=None, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image as per the original implementation. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Dictionary containing the height and width to resize the image to. anti_aliasing (`bool`, *optional*, defaults to `True`): Whether to apply anti-aliasing when downsampling the image. anti_aliasing_sigma (`float`, *optional*, defaults to `None`): Standard deviation for Gaussian kernel when downsampling the image. If `None`, it will be calculated automatically. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred from the input image. """ requires_backends(self, "scipy") output_shape = (size["height"], size["width"]) image = to_channel_dimension_format(image, ChannelDimension.LAST) image, output_shape = _preprocess_resize_output_shape(image, output_shape) input_shape = image.shape factors = np.divide(input_shape, output_shape) # Translate modes used by np.pad to those used by scipy.ndimage ndi_mode = "mirror" cval = 0 order = 1 if anti_aliasing: if anti_aliasing_sigma is None: anti_aliasing_sigma = np.maximum(0, (factors - 1) / 2) else: anti_aliasing_sigma = np.atleast_1d(anti_aliasing_sigma) * np.ones_like(factors) if np.any(anti_aliasing_sigma < 0): raise ValueError("Anti-aliasing standard deviation must be " "greater than or equal to zero") elif np.any((anti_aliasing_sigma > 0) & (factors <= 1)): warnings.warn( "Anti-aliasing standard deviation greater than zero but " "not down-sampling along all axes" ) filtered = ndi.gaussian_filter(image, anti_aliasing_sigma, cval=cval, mode=ndi_mode) else: filtered = image zoom_factors = [1 / f for f in factors] out = ndi.zoom(filtered, zoom_factors, order=order, mode=ndi_mode, cval=cval, grid_mode=True) image = _clip_warp_output(image, out) image = to_channel_dimension_format(image, input_data_format, ChannelDimension.LAST) image = ( to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image ) return image def preprocess( self, images: ImageInput, do_pad: bool = None, do_resize: bool = None, size: Dict[str, int] = None, do_rescale: bool = None, rescale_factor: float = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_pad (`bool`, *optional*, defaults to `self.do_pad`): Whether to pad the image to a square with gray pixels on the bottom and the right. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size to resize the image to. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_pad = do_pad if do_pad is not None else self.do_pad do_resize = do_resize if do_resize is not None else self.do_resize do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std size = size if size is not None else self.size images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True.") if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True.") # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if is_scaled_image(images[0]) and do_rescale: logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_rescale: images = [ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images ] if do_pad: images = [self.pad(image=image, input_data_format=input_data_format) for image in images] if do_resize: images = [ self.resize( image=image, size=size, input_data_format=input_data_format, ) for image in images ] if do_normalize: images = [ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images ] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) # Copied from transformers.models.owlvit.image_processing_owlvit.OwlViTImageProcessor.post_process_object_detection def post_process_object_detection( self, outputs, threshold: float = 0.1, target_sizes: Union[TensorType, List[Tuple]] = None ): """ Converts the raw output of [`OwlViTForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Args: outputs ([`OwlViTObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size `(height, width)` of each image in the batch. If unset, predictions will not be resized. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ # TODO: (amy) add support for other frameworks logits, boxes = outputs.logits, outputs.pred_boxes if target_sizes is not None: if len(logits) != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) probs = torch.max(logits, dim=-1) scores = torch.sigmoid(probs.values) labels = probs.indices # Convert to [x0, y0, x1, y1] format boxes = center_to_corners_format(boxes) # Convert from relative [0, 1] to absolute [0, height] coordinates if target_sizes is not None: if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) else: img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) boxes = boxes * scale_fct[:, None, :] results = [] for s, l, b in zip(scores, labels, boxes): score = s[s > threshold] label = l[s > threshold] box = b[s > threshold] results.append({"scores": score, "labels": label, "boxes": box}) return results # Copied from transformers.models.owlvit.image_processing_owlvit.OwlViTImageProcessor.post_process_image_guided_detection def post_process_image_guided_detection(self, outputs, threshold=0.0, nms_threshold=0.3, target_sizes=None): """ Converts the output of [`OwlViTForObjectDetection.image_guided_detection`] into the format expected by the COCO api. Args: outputs ([`OwlViTImageGuidedObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.0): Minimum confidence threshold to use to filter out predicted boxes. nms_threshold (`float`, *optional*, defaults to 0.3): IoU threshold for non-maximum suppression of overlapping boxes. target_sizes (`torch.Tensor`, *optional*): Tensor of shape (batch_size, 2) where each entry is the (height, width) of the corresponding image in the batch. If set, predicted normalized bounding boxes are rescaled to the target sizes. If left to None, predictions will not be unnormalized. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. All labels are set to None as `OwlViTForObjectDetection.image_guided_detection` perform one-shot object detection. """ logits, target_boxes = outputs.logits, outputs.target_pred_boxes if len(logits) != len(target_sizes): raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits") if target_sizes.shape[1] != 2: raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch") probs = torch.max(logits, dim=-1) scores = torch.sigmoid(probs.values) # Convert to [x0, y0, x1, y1] format target_boxes = center_to_corners_format(target_boxes) # Apply non-maximum suppression (NMS) if nms_threshold < 1.0: for idx in range(target_boxes.shape[0]): for i in torch.argsort(-scores[idx]): if not scores[idx][i]: continue ious = box_iou(target_boxes[idx][i, :].unsqueeze(0), target_boxes[idx])[0][0] ious[i] = -1.0 # Mask self-IoU. scores[idx][ious > nms_threshold] = 0.0 # Convert from relative [0, 1] to absolute [0, height] coordinates img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(target_boxes.device) target_boxes = target_boxes * scale_fct[:, None, :] # Compute box display alphas based on prediction scores results = [] alphas = torch.zeros_like(scores) for idx in range(target_boxes.shape[0]): # Select scores for boxes matching the current query: query_scores = scores[idx] if not query_scores.nonzero().numel(): continue # Apply threshold on scores before scaling query_scores[query_scores < threshold] = 0.0 # Scale box alpha such that the best box for each query has alpha 1.0 and the worst box has alpha 0.1. # All other boxes will either belong to a different query, or will not be shown. max_score = torch.max(query_scores) + 1e-6 query_alphas = (query_scores - (max_score * 0.1)) / (max_score * 0.9) query_alphas = torch.clip(query_alphas, 0.0, 1.0) alphas[idx] = query_alphas mask = alphas[idx] > 0 box_scores = alphas[idx][mask] boxes = target_boxes[idx][mask] results.append({"scores": box_scores, "labels": None, "boxes": boxes}) return results
transformers/src/transformers/models/owlv2/image_processing_owlv2.py/0
{ "file_path": "transformers/src/transformers/models/owlv2/image_processing_owlv2.py", "repo_id": "transformers", "token_count": 11225 }
352
# coding=utf-8 # Copyright 2021 Deepmind and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Perceiver model.""" import abc import math from dataclasses import dataclass from functools import reduce from operator import __add__ from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple, Union import numpy as np import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutputWithCrossAttentions from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, meshgrid, prune_linear_layer from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_perceiver import PerceiverConfig ModalitySizeType = Mapping[str, int] PreprocessorOutputType = Tuple[torch.Tensor, Optional[torch.Tensor], torch.Tensor] PreprocessorType = Callable[..., PreprocessorOutputType] PostprocessorType = Callable[..., Any] logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "deepmind/language-perceiver" _CONFIG_FOR_DOC = "PerceiverConfig" PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST = [ "deepmind/language-perceiver", # See all Perceiver models at https://huggingface.co/models?filter=perceiver ] @dataclass class PerceiverModelOutput(ModelOutput): """ Base class for Perceiver base model's outputs, with potential hidden states, attentions and cross-attentions. Args: logits (`torch.FloatTensor` of shape `(batch_size, num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. """ logits: torch.FloatTensor = None last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class PerceiverDecoderOutput(ModelOutput): """ Base class for Perceiver decoder outputs, with potential cross-attentions. Args: logits (`torch.FloatTensor` of shape `(batch_size, num_labels)`): Output of the basic decoder. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. """ logits: torch.FloatTensor = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class PerceiverMaskedLMOutput(ModelOutput): """ Base class for Perceiver's masked language model outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Masked language modeling (MLM) loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, num_latents, num_latents)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class PerceiverClassifierOutput(ModelOutput): """ Base class for Perceiver's outputs of sequence/image classification models, optical flow and multimodal autoencoding. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None class PerceiverEmbeddings(nn.Module): """Construct the latent embeddings.""" def __init__(self, config): super().__init__() self.latents = nn.Parameter(torch.randn(config.num_latents, config.d_latents)) def forward(self, batch_size: int): return self.latents.expand(batch_size, -1, -1) # Thanks, Phil Wang class PerceiverSelfAttention(nn.Module): """Multi-headed {cross, self}-attention. Can be used both in the encoder as well as in the decoder.""" def __init__( self, config, is_cross_attention=False, qk_channels=None, v_channels=None, num_heads=1, q_dim=None, kv_dim=None, ): super().__init__() self.num_heads = num_heads # Q and K must have the same number of channels. # Default to preserving Q's input's shape. if qk_channels is None: qk_channels = q_dim # V's num_channels determines the shape of the output of QKV-attention. # Default to the same number of channels used in the key-query operation. if v_channels is None: v_channels = qk_channels if qk_channels % num_heads != 0: raise ValueError(f"qk_channels ({qk_channels}) must be divisible by num_heads ({num_heads}).") if v_channels % num_heads != 0: raise ValueError(f"v_channels ({v_channels}) must be divisible by num_heads ({num_heads}).") self.qk_channels = qk_channels self.v_channels = v_channels self.qk_channels_per_head = self.qk_channels // num_heads self.v_channels_per_head = self.v_channels // num_heads # Layer normalization self.layernorm1 = nn.LayerNorm(q_dim) self.layernorm2 = nn.LayerNorm(kv_dim) if is_cross_attention else nn.Identity() # Projection matrices self.query = nn.Linear(q_dim, qk_channels) self.key = nn.Linear(kv_dim, qk_channels) self.value = nn.Linear(kv_dim, v_channels) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x, channels_per_head): new_x_shape = x.size()[:-1] + (self.num_heads, channels_per_head) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs: Optional[torch.FloatTensor] = None, inputs_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: hidden_states = self.layernorm1(hidden_states) inputs = self.layernorm2(inputs) # Project queries, keys and values to a common feature dimension. If this is instantiated as a cross-attention module, # the keys and values come from the inputs; the attention mask needs to be such that the inputs's non-relevant tokens are not attended to. is_cross_attention = inputs is not None queries = self.query(hidden_states) if is_cross_attention: keys = self.key(inputs) values = self.value(inputs) attention_mask = inputs_mask else: keys = self.key(hidden_states) values = self.value(hidden_states) # Reshape channels for multi-head attention. # We reshape from (batch_size, time, channels) to (batch_size, num_heads, time, channels per head) queries = self.transpose_for_scores(queries, self.qk_channels_per_head) keys = self.transpose_for_scores(keys, self.qk_channels_per_head) values = self.transpose_for_scores(values, self.v_channels_per_head) # Take the dot product between the queries and keys to get the raw attention scores. attention_scores = torch.matmul(queries, keys.transpose(-1, -2)) batch_size, num_heads, seq_len, q_head_dim = queries.shape _, _, _, v_head_dim = values.shape hiddens = self.num_heads * v_head_dim attention_scores = attention_scores / math.sqrt(q_head_dim) if attention_mask is not None: # Apply the attention mask (precomputed for all layers in PerceiverModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, values) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (hiddens,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class PerceiverSelfOutput(nn.Module): def __init__(self, config, input_channels, output_channels): super().__init__() self.dense = nn.Linear(input_channels, output_channels) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) return hidden_states class PerceiverAttention(nn.Module): """Attention module, including a dense block.""" def __init__( self, config, is_cross_attention=False, qk_channels=None, v_channels=None, num_heads=1, q_dim=None, kv_dim=None, use_query_residual=True, ): super().__init__() # MultiHead attention if is_cross_attention and qk_channels is None: if config.cross_attention_shape_for_attention == "q": qk_channels = q_dim elif config.cross_attention_shape_for_attention == "kv": qk_channels = kv_dim else: raise ValueError( f"Unknown value {config.cross_attention_shape_for_attention} for " "cross_attention_shape_for_attention." ) else: if qk_channels is None: qk_channels = q_dim if v_channels is None: v_channels = qk_channels self.self = PerceiverSelfAttention( config, is_cross_attention=is_cross_attention, qk_channels=qk_channels, v_channels=v_channels, num_heads=num_heads, q_dim=q_dim, kv_dim=kv_dim, ) # dense block output_channels = None if is_cross_attention: output_channels = q_dim else: if output_channels is None: output_channels = v_channels self.output = PerceiverSelfOutput(config, input_channels=self.self.v_channels, output_channels=output_channels) self.use_query_residual = use_query_residual self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs: Optional[torch.FloatTensor] = None, inputs_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: self_outputs = self.self( hidden_states, attention_mask, head_mask, inputs, inputs_mask, output_attentions, ) # Output projection attention_output = self.output(self_outputs[0]) # Optionally include a residual to the original queries. # Consider omitting the residual if the semantics of query and output # are different, e.g. if queries are positions and outputs are pixels. if self.use_query_residual: attention_output = attention_output + hidden_states outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class PerceiverMLP(nn.Module): """A Transformer-style dense module to follow attention.""" def __init__(self, config, input_size, widening_factor): super().__init__() self.dense1 = nn.Linear(input_size, widening_factor * input_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act self.dense2 = nn.Linear(widening_factor * input_size, input_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense1(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.dense2(hidden_states) return hidden_states class PerceiverLayer(nn.Module): def __init__( self, config, is_cross_attention=False, qk_channels=None, v_channels=None, num_heads=1, q_dim=None, kv_dim=None, widening_factor=4, use_query_residual=True, ): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = PerceiverAttention( config, is_cross_attention=is_cross_attention, qk_channels=qk_channels, v_channels=v_channels, num_heads=num_heads, q_dim=q_dim, kv_dim=kv_dim, use_query_residual=use_query_residual, ) self.layernorm = nn.LayerNorm(q_dim) self.mlp = PerceiverMLP(config, input_size=q_dim, widening_factor=widening_factor) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs: Optional[torch.FloatTensor] = None, inputs_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: attention_outputs = self.attention( hidden_states, attention_mask, head_mask, inputs, inputs_mask, output_attentions, ) attention_output = attention_outputs[0] outputs = attention_outputs[1:] # add attentions if we output attention weights layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) layer_output = layer_output + attention_output # residual connection outputs = (layer_output,) + outputs return outputs def feed_forward_chunk(self, attention_output): layer_output = self.layernorm(attention_output) layer_output = self.mlp(layer_output) return layer_output class PerceiverEncoder(nn.Module): """The Perceiver Encoder: a scalable, fully attentional encoder.""" def __init__(self, config, kv_dim=None): super().__init__() self.config = config # Check that we can use multihead-attention with these shapes. if config.d_latents % config.num_self_attention_heads != 0: raise ValueError( f"num_z_channels ({config.d_latents}) must be divisible by" f" num_self_attend_heads ({config.num_self_attention_heads})." ) if config.d_latents % config.num_cross_attention_heads != 0: raise ValueError( f"num_z_channels ({config.d_latents}) must be divisible by" f" num_cross_attend_heads ({config.num_cross_attention_heads})." ) # Construct the cross attention layer. self.cross_attention = PerceiverLayer( config, is_cross_attention=True, qk_channels=config.qk_channels, v_channels=config.v_channels, num_heads=config.num_cross_attention_heads, q_dim=config.d_latents, kv_dim=kv_dim, widening_factor=config.cross_attention_widening_factor, use_query_residual=config.use_query_residual, ) # Construct a single block of self-attention layers. # We get deeper architectures by applying this block more than once. self_attention_layers = [] for _ in range(config.num_self_attends_per_block): layer = PerceiverLayer( config, is_cross_attention=False, qk_channels=config.qk_channels, v_channels=config.v_channels, num_heads=config.num_self_attention_heads, q_dim=config.d_latents, kv_dim=config.d_latents, widening_factor=config.self_attention_widening_factor, ) self_attention_layers.append(layer) self.self_attends = nn.ModuleList(self_attention_layers) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs: Optional[torch.FloatTensor] = None, inputs_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple, BaseModelOutputWithCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions else None # Apply the cross-attention between the latents (hidden_states) and inputs: layer_outputs = self.cross_attention( hidden_states, attention_mask=attention_mask, head_mask=None, inputs=inputs, inputs_mask=inputs_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_cross_attentions = all_cross_attentions + (layer_outputs[1],) # Apply the block of self-attention layers more than once: for _ in range(self.config.num_blocks): for i, layer_module in enumerate(self.self_attends): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module( hidden_states, attention_mask=attention_mask, head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None ) return BaseModelOutputWithCrossAttentions( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) class PerceiverPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = PerceiverConfig base_model_prefix = "perceiver" main_input_name = "inputs" def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif hasattr(module, "latents"): module.latents.data.normal_(mean=0.0, std=self.config.initializer_range) elif hasattr(module, "position_embeddings") and isinstance(module, PerceiverTrainablePositionEncoding): module.position_embeddings.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.ParameterDict): for modality in module.keys(): module[modality].data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) PERCEIVER_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`PerceiverConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ PERCEIVER_MODEL_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`PerceiverConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. decoder (*DecoderType*, *optional*): Optional decoder to use to decode the latent representation of the encoder. Examples include *transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder*, *transformers.models.perceiver.modeling_perceiver.PerceiverClassificationDecoder*, *transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder*. input_preprocessor (*PreprocessorType*, *optional*): Optional input preprocessor to use. Examples include *transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor*, *transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor*, *transformers.models.perceiver.modeling_perceiver.PerceiverTextPreprocessor*, *transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor*. output_postprocessor (*PostprocessorType*, *optional*): Optional output postprocessor to use. Examples include *transformers.models.perceiver.modeling_perceiver.PerceiverImagePostprocessor*, *transformers.models.perceiver.modeling_perceiver.PerceiverAudioPostprocessor*, *transformers.models.perceiver.modeling_perceiver.PerceiverClassificationPostprocessor*, *transformers.models.perceiver.modeling_perceiver.PerceiverProjectionPostprocessor*, *transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPostprocessor*. Note that you can define your own decoders, preprocessors and/or postprocessors to fit your use-case. """ PERCEIVER_INPUTS_DOCSTRING = r""" Args: inputs (`torch.FloatTensor`): Inputs to the perceiver. Can be anything: images, text, audio, video, etc. attention_mask (`torch.FloatTensor` of shape `{0}`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( """The Perceiver: a scalable, fully attentional architecture.""", PERCEIVER_MODEL_START_DOCSTRING, ) class PerceiverModel(PerceiverPreTrainedModel): def __init__( self, config, decoder=None, input_preprocessor: PreprocessorType = None, output_postprocessor: PostprocessorType = None, ): super().__init__(config) self.config = config self.input_preprocessor = input_preprocessor self.output_postprocessor = output_postprocessor self.embeddings = PerceiverEmbeddings(config) self.encoder = PerceiverEncoder( config, kv_dim=input_preprocessor.num_channels if input_preprocessor is not None else config.d_model ) self.decoder = decoder # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.latents def set_input_embeddings(self, value): self.embeddings.latents = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(PERCEIVER_INPUTS_DOCSTRING.format("(batch_size, sequence_length)")) @replace_return_docstrings(output_type=PerceiverModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, inputs: torch.FloatTensor, attention_mask: Optional[torch.FloatTensor] = None, subsampled_output_points: Optional[Dict[str, torch.Tensor]] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, PerceiverModelOutput]: r""" Returns: Examples: ```python >>> from transformers import PerceiverConfig, PerceiverTokenizer, PerceiverImageProcessor, PerceiverModel >>> from transformers.models.perceiver.modeling_perceiver import ( ... PerceiverTextPreprocessor, ... PerceiverImagePreprocessor, ... PerceiverClassificationDecoder, ... ) >>> import torch >>> import requests >>> from PIL import Image >>> # EXAMPLE 1: using the Perceiver to classify texts >>> # - we define a TextPreprocessor, which can be used to embed tokens >>> # - we define a ClassificationDecoder, which can be used to decode the >>> # final hidden states of the latents to classification logits >>> # using trainable position embeddings >>> config = PerceiverConfig() >>> preprocessor = PerceiverTextPreprocessor(config) >>> decoder = PerceiverClassificationDecoder( ... config, ... num_channels=config.d_latents, ... trainable_position_encoding_kwargs=dict(num_channels=config.d_latents, index_dims=1), ... use_query_residual=True, ... ) >>> model = PerceiverModel(config, input_preprocessor=preprocessor, decoder=decoder) >>> # you can then do a forward pass as follows: >>> tokenizer = PerceiverTokenizer() >>> text = "hello world" >>> inputs = tokenizer(text, return_tensors="pt").input_ids >>> with torch.no_grad(): ... outputs = model(inputs=inputs) >>> logits = outputs.logits >>> list(logits.shape) [1, 2] >>> # to train, one can train the model using standard cross-entropy: >>> criterion = torch.nn.CrossEntropyLoss() >>> labels = torch.tensor([1]) >>> loss = criterion(logits, labels) >>> # EXAMPLE 2: using the Perceiver to classify images >>> # - we define an ImagePreprocessor, which can be used to embed images >>> config = PerceiverConfig(image_size=224) >>> preprocessor = PerceiverImagePreprocessor( ... config, ... prep_type="conv1x1", ... spatial_downsample=1, ... out_channels=256, ... position_encoding_type="trainable", ... concat_or_add_pos="concat", ... project_pos_dim=256, ... trainable_position_encoding_kwargs=dict( ... num_channels=256, ... index_dims=config.image_size**2, ... ), ... ) >>> model = PerceiverModel( ... config, ... input_preprocessor=preprocessor, ... decoder=PerceiverClassificationDecoder( ... config, ... num_channels=config.d_latents, ... trainable_position_encoding_kwargs=dict(num_channels=config.d_latents, index_dims=1), ... use_query_residual=True, ... ), ... ) >>> # you can then do a forward pass as follows: >>> image_processor = PerceiverImageProcessor() >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = image_processor(image, return_tensors="pt").pixel_values >>> with torch.no_grad(): ... outputs = model(inputs=inputs) >>> logits = outputs.logits >>> list(logits.shape) [1, 2] >>> # to train, one can train the model using standard cross-entropy: >>> criterion = torch.nn.CrossEntropyLoss() >>> labels = torch.tensor([1]) >>> loss = criterion(logits, labels) ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.input_preprocessor is not None: inputs, modality_sizes, inputs_without_pos = self.input_preprocessor(inputs) else: modality_sizes = None inputs_without_pos = None if inputs.size()[-1] != self.config.d_model: raise ValueError( f"Last dimension of the inputs: {inputs.size()[-1]} doesn't correspond to config.d_model:" f" {self.config.d_model}. Make sure to set config.d_model appropriately." ) batch_size, seq_length, _ = inputs.size() device = inputs.device # If no attention mask is provided, make them all ones if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length), device=device) # Make the attention mask broadcastable to [batch_size, num_heads, seq_length, seq_length] extended_attention_mask = self.invert_attention_mask(attention_mask) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_blocks x num_heads] # and head_mask is converted to shape [num_blocks x batch x num_heads x N x N] head_mask = self.get_head_mask(head_mask, self.config.num_blocks * self.config.num_self_attends_per_block) embedding_output = self.embeddings(batch_size=batch_size) encoder_outputs = self.encoder( embedding_output, attention_mask=None, head_mask=head_mask, inputs=inputs, inputs_mask=extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] logits = None if self.decoder: if subsampled_output_points is not None: output_modality_sizes = { "audio": subsampled_output_points["audio"].shape[0], "image": subsampled_output_points["image"].shape[0], "label": 1, } else: output_modality_sizes = modality_sizes decoder_query = self.decoder.decoder_query( inputs, modality_sizes, inputs_without_pos, subsampled_points=subsampled_output_points ) decoder_outputs = self.decoder( decoder_query, z=sequence_output, query_mask=extended_attention_mask, output_attentions=output_attentions, ) logits = decoder_outputs.logits # add cross-attentions of decoder if output_attentions and decoder_outputs.cross_attentions is not None: if return_dict: encoder_outputs.cross_attentions = ( encoder_outputs.cross_attentions + decoder_outputs.cross_attentions ) else: encoder_outputs = encoder_outputs + decoder_outputs.cross_attentions if self.output_postprocessor: logits = self.output_postprocessor(logits, modality_sizes=output_modality_sizes) if not return_dict: if logits is not None: return (logits, sequence_output) + encoder_outputs[1:] else: return (sequence_output,) + encoder_outputs[1:] return PerceiverModelOutput( logits=logits, last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) @add_start_docstrings("""Example use of Perceiver for masked language modeling.""", PERCEIVER_START_DOCSTRING) class PerceiverForMaskedLM(PerceiverPreTrainedModel): def __init__(self, config: PerceiverConfig): super().__init__(config) text_preprocessor = PerceiverTextPreprocessor(config) trainable_position_encoding_kwargs_decoder = { "num_channels": text_preprocessor.num_channels, "index_dims": config.max_position_embeddings, } self.perceiver = PerceiverModel( config, input_preprocessor=text_preprocessor, decoder=PerceiverBasicDecoder( config, output_num_channels=config.d_latents, output_index_dims=config.max_position_embeddings, # we need to define the seq_len of the inputs beforehand num_channels=text_preprocessor.num_channels, qk_channels=8 * 32, v_channels=text_preprocessor.num_channels, num_heads=8, use_query_residual=False, final_project=False, trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder, ), ) self.embedding_decoder = PerceiverEmbeddingDecoder(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(PERCEIVER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=PerceiverMaskedLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, inputs: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, input_ids: Optional[torch.Tensor] = None, ) -> Union[Tuple, PerceiverMaskedLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` Returns: Examples: ```python >>> from transformers import AutoTokenizer, PerceiverForMaskedLM >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("deepmind/language-perceiver") >>> model = PerceiverForMaskedLM.from_pretrained("deepmind/language-perceiver") >>> # training >>> text = "This is an incomplete sentence where some words are missing." >>> inputs = tokenizer(text, padding="max_length", return_tensors="pt") >>> # mask " missing." >>> inputs["input_ids"][0, 52:61] = tokenizer.mask_token_id >>> labels = tokenizer(text, padding="max_length", return_tensors="pt").input_ids >>> outputs = model(**inputs, labels=labels) >>> loss = outputs.loss >>> round(loss.item(), 2) 19.87 >>> logits = outputs.logits >>> list(logits.shape) [1, 2048, 262] >>> # inference >>> text = "This is an incomplete sentence where some words are missing." >>> encoding = tokenizer(text, padding="max_length", return_tensors="pt") >>> # mask bytes corresponding to " missing.". Note that the model performs much better if the masked span starts with a space. >>> encoding["input_ids"][0, 52:61] = tokenizer.mask_token_id >>> # forward pass >>> with torch.no_grad(): ... outputs = model(**encoding) >>> logits = outputs.logits >>> list(logits.shape) [1, 2048, 262] >>> masked_tokens_predictions = logits[0, 52:61].argmax(dim=-1).tolist() >>> tokenizer.decode(masked_tokens_predictions) ' missing.' ```""" if inputs is not None and input_ids is not None: raise ValueError("You cannot use both `inputs` and `input_ids`") elif inputs is None and input_ids is not None: inputs = input_ids return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.perceiver( inputs=inputs, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = self.embedding_decoder( outputs.logits if return_dict else outputs[0], embedding_layer=self.perceiver.input_preprocessor.embeddings ) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return PerceiverMaskedLMOutput( loss=masked_lm_loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) @add_start_docstrings("""Example use of Perceiver for text classification.""", PERCEIVER_START_DOCSTRING) class PerceiverForSequenceClassification(PerceiverPreTrainedModel): def __init__(self, config): super().__init__(config) trainable_position_encoding_kwargs_decoder = {"num_channels": config.d_latents, "index_dims": 1} self.num_labels = config.num_labels self.perceiver = PerceiverModel( config, input_preprocessor=PerceiverTextPreprocessor(config), decoder=PerceiverClassificationDecoder( config, num_channels=config.d_latents, trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder, use_query_residual=True, ), ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(PERCEIVER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, inputs: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, input_ids: Optional[torch.Tensor] = None, ) -> Union[Tuple, PerceiverClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: Examples: ```python >>> from transformers import AutoTokenizer, PerceiverForSequenceClassification >>> tokenizer = AutoTokenizer.from_pretrained("deepmind/language-perceiver") >>> model = PerceiverForSequenceClassification.from_pretrained("deepmind/language-perceiver") >>> text = "hello world" >>> inputs = tokenizer(text, return_tensors="pt").input_ids >>> outputs = model(inputs=inputs) >>> logits = outputs.logits >>> list(logits.shape) [1, 2] ```""" if inputs is not None and input_ids is not None: raise ValueError("You cannot use both `inputs` and `input_ids`") elif inputs is None and input_ids is not None: inputs = input_ids return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.perceiver( inputs=inputs, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = outputs.logits if return_dict else outputs[0] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return PerceiverClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) @add_start_docstrings( """ Example use of Perceiver for image classification, for tasks such as ImageNet. This model uses learned position embeddings. In other words, this model is not given any privileged information about the structure of images. As shown in the paper, this model can achieve a top-1 accuracy of 72.7 on ImageNet. [`PerceiverForImageClassificationLearned`] uses [`~models.perceiver.modeling_perceiver.PerceiverImagePreprocessor`] (with `prep_type="conv1x1"`) to preprocess the input images, and [`~models.perceiver.modeling_perceiver.PerceiverClassificationDecoder`] to decode the latent representation of [`PerceiverModel`] into classification logits. """, PERCEIVER_START_DOCSTRING, ) class PerceiverForImageClassificationLearned(PerceiverPreTrainedModel): def __init__(self, config): super().__init__(config) trainable_position_encoding_kwargs_preprocessor = {"num_channels": 256, "index_dims": config.image_size**2} trainable_position_encoding_kwargs_decoder = {"num_channels": config.d_latents, "index_dims": 1} self.num_labels = config.num_labels self.perceiver = PerceiverModel( config, input_preprocessor=PerceiverImagePreprocessor( config, prep_type="conv1x1", spatial_downsample=1, out_channels=256, position_encoding_type="trainable", concat_or_add_pos="concat", project_pos_dim=256, trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_preprocessor, ), decoder=PerceiverClassificationDecoder( config, num_channels=config.d_latents, trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder, use_query_residual=True, ), ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(PERCEIVER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, inputs: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, pixel_values: Optional[torch.Tensor] = None, ) -> Union[Tuple, PerceiverClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: Examples: ```python >>> from transformers import AutoImageProcessor, PerceiverForImageClassificationLearned >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("deepmind/vision-perceiver-learned") >>> model = PerceiverForImageClassificationLearned.from_pretrained("deepmind/vision-perceiver-learned") >>> inputs = image_processor(images=image, return_tensors="pt").pixel_values >>> outputs = model(inputs=inputs) >>> logits = outputs.logits >>> list(logits.shape) [1, 1000] >>> # model predicts one of the 1000 ImageNet classes >>> predicted_class_idx = logits.argmax(-1).item() >>> print("Predicted class:", model.config.id2label[predicted_class_idx]) Predicted class: tabby, tabby cat ```""" if inputs is not None and pixel_values is not None: raise ValueError("You cannot use both `inputs` and `pixel_values`") elif inputs is None and pixel_values is not None: inputs = pixel_values return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.perceiver( inputs=inputs, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = outputs.logits if return_dict else outputs[0] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return PerceiverClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) @add_start_docstrings( """ Example use of Perceiver for image classification, for tasks such as ImageNet. This model uses fixed 2D Fourier position embeddings. As shown in the paper, this model can achieve a top-1 accuracy of 79.0 on ImageNet, and 84.5 when pre-trained on a large-scale dataset (i.e. JFT). [`PerceiverForImageClassificationLearned`] uses [`~models.perceiver.modeling_perceiver.PerceiverImagePreprocessor`] (with `prep_type="pixels"`) to preprocess the input images, and [`~models.perceiver.modeling_perceiver.PerceiverClassificationDecoder`] to decode the latent representation of [`PerceiverModel`] into classification logits. """, PERCEIVER_START_DOCSTRING, ) class PerceiverForImageClassificationFourier(PerceiverPreTrainedModel): def __init__(self, config): super().__init__(config) fourier_position_encoding_kwargs_preprocessor = { "concat_pos": True, "max_resolution": (224, 224), "num_bands": 64, "sine_only": False, } trainable_position_encoding_kwargs_decoder = {"num_channels": config.d_latents, "index_dims": 1} self.num_labels = config.num_labels self.perceiver = PerceiverModel( config, input_preprocessor=PerceiverImagePreprocessor( config, prep_type="pixels", spatial_downsample=1, fourier_position_encoding_kwargs=fourier_position_encoding_kwargs_preprocessor, ), decoder=PerceiverClassificationDecoder( config, num_channels=config.d_latents, trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder, use_query_residual=True, ), ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(PERCEIVER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, inputs: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, pixel_values: Optional[torch.Tensor] = None, ) -> Union[Tuple, PerceiverClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: Examples: ```python >>> from transformers import AutoImageProcessor, PerceiverForImageClassificationFourier >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("deepmind/vision-perceiver-fourier") >>> model = PerceiverForImageClassificationFourier.from_pretrained("deepmind/vision-perceiver-fourier") >>> inputs = image_processor(images=image, return_tensors="pt").pixel_values >>> outputs = model(inputs=inputs) >>> logits = outputs.logits >>> list(logits.shape) [1, 1000] >>> # model predicts one of the 1000 ImageNet classes >>> predicted_class_idx = logits.argmax(-1).item() >>> print("Predicted class:", model.config.id2label[predicted_class_idx]) Predicted class: tabby, tabby cat ```""" if inputs is not None and pixel_values is not None: raise ValueError("You cannot use both `inputs` and `pixel_values`") elif inputs is None and pixel_values is not None: inputs = pixel_values return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.perceiver( inputs=inputs, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = outputs.logits if return_dict else outputs[0] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return PerceiverClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) @add_start_docstrings( """ Example use of Perceiver for image classification, for tasks such as ImageNet. This model uses a 2D conv+maxpool preprocessing network. As shown in the paper, this model can achieve a top-1 accuracy of 82.1 on ImageNet. [`PerceiverForImageClassificationLearned`] uses [`~models.perceiver.modeling_perceiver.PerceiverImagePreprocessor`] (with `prep_type="conv"`) to preprocess the input images, and [`~models.perceiver.modeling_perceiver.PerceiverClassificationDecoder`] to decode the latent representation of [`PerceiverModel`] into classification logits. """, PERCEIVER_START_DOCSTRING, ) class PerceiverForImageClassificationConvProcessing(PerceiverPreTrainedModel): def __init__(self, config): super().__init__(config) fourier_position_encoding_kwargs_preprocessor = { "concat_pos": True, "max_resolution": (56, 56), "num_bands": 64, "sine_only": False, } trainable_position_encoding_kwargs_decoder = {"num_channels": config.d_latents, "index_dims": 1} self.num_labels = config.num_labels self.perceiver = PerceiverModel( config, input_preprocessor=PerceiverImagePreprocessor( config, prep_type="conv", spatial_downsample=1, position_encoding_type="fourier", fourier_position_encoding_kwargs=fourier_position_encoding_kwargs_preprocessor, ), decoder=PerceiverClassificationDecoder( config, num_channels=config.d_latents, trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder, use_query_residual=True, ), ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(PERCEIVER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, inputs: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, pixel_values: Optional[torch.Tensor] = None, ) -> Union[Tuple, PerceiverClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: Examples: ```python >>> from transformers import AutoImageProcessor, PerceiverForImageClassificationConvProcessing >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("deepmind/vision-perceiver-conv") >>> model = PerceiverForImageClassificationConvProcessing.from_pretrained("deepmind/vision-perceiver-conv") >>> inputs = image_processor(images=image, return_tensors="pt").pixel_values >>> outputs = model(inputs=inputs) >>> logits = outputs.logits >>> list(logits.shape) [1, 1000] >>> # model predicts one of the 1000 ImageNet classes >>> predicted_class_idx = logits.argmax(-1).item() >>> print("Predicted class:", model.config.id2label[predicted_class_idx]) Predicted class: tabby, tabby cat ```""" if inputs is not None and pixel_values is not None: raise ValueError("You cannot use both `inputs` and `pixel_values`") elif inputs is None and pixel_values is not None: inputs = pixel_values return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.perceiver( inputs=inputs, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = outputs.logits if return_dict else outputs[0] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return PerceiverClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) @add_start_docstrings( """ Example use of Perceiver for optical flow, for tasks such as Sintel and KITTI. [`PerceiverForOpticalFlow`] uses [`~models.perceiver.modeling_perceiver.PerceiverImagePreprocessor`] (with *prep_type="patches"*) to preprocess the input images, and [`~models.perceiver.modeling_perceiver.PerceiverOpticalFlowDecoder`] to decode the latent representation of [`PerceiverModel`]. As input, one concatenates 2 subsequent frames along the channel dimension and extract a 3 x 3 patch around each pixel (leading to 3 x 3 x 3 x 2 = 54 values for each pixel). Fixed Fourier position encodings are used to encode the position of each pixel in the patch. Next, one applies the Perceiver encoder. To decode, one queries the latent representation using the same encoding used for the input. """, PERCEIVER_START_DOCSTRING, ) class PerceiverForOpticalFlow(PerceiverPreTrainedModel): def __init__(self, config): super().__init__(config) fourier_position_encoding_kwargs_preprocessor = { "num_bands": 64, "max_resolution": config.train_size, "sine_only": False, "concat_pos": True, } fourier_position_encoding_kwargs_decoder = { "concat_pos": True, "max_resolution": config.train_size, "num_bands": 64, "sine_only": False, } image_preprocessor = PerceiverImagePreprocessor( config, prep_type="patches", spatial_downsample=1, conv_after_patching=True, conv_after_patching_in_channels=54, temporal_downsample=2, position_encoding_type="fourier", # position_encoding_kwargs fourier_position_encoding_kwargs=fourier_position_encoding_kwargs_preprocessor, ) self.perceiver = PerceiverModel( config, input_preprocessor=image_preprocessor, decoder=PerceiverOpticalFlowDecoder( config, num_channels=image_preprocessor.num_channels, output_image_shape=config.train_size, rescale_factor=100.0, # decoder kwargs use_query_residual=False, output_num_channels=2, # We query the decoder using the first frame features # rather than a standard decoder position encoding. position_encoding_type="fourier", fourier_position_encoding_kwargs=fourier_position_encoding_kwargs_decoder, ), ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(PERCEIVER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, inputs: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, PerceiverClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the optical flow loss. Indices should be in `[0, ..., config.num_labels - 1]`. Returns: Examples: ```python >>> from transformers import PerceiverForOpticalFlow >>> import torch >>> model = PerceiverForOpticalFlow.from_pretrained("deepmind/optical-flow-perceiver") >>> # in the Perceiver IO paper, the authors extract a 3 x 3 patch around each pixel, >>> # leading to 3 x 3 x 3 = 27 values for each pixel (as each pixel also has 3 color channels) >>> # patches have shape (batch_size, num_frames, num_channels, height, width) >>> # the authors train on resolutions of 368 x 496 >>> patches = torch.randn(1, 2, 27, 368, 496) >>> outputs = model(inputs=patches) >>> logits = outputs.logits >>> list(logits.shape) [1, 368, 496, 2] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.perceiver( inputs=inputs, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = outputs.logits if return_dict else outputs[0] loss = None if labels is not None: raise NotImplementedError("Optical flow training is not yet supported") if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return PerceiverClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) @add_start_docstrings( """ Example use of Perceiver for multimodal (video) autoencoding, for tasks such as Kinetics-700. [`PerceiverForMultimodalAutoencoding`] uses [`~models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor`] to preprocess the 3 modalities: images, audio and class labels. This preprocessor uses modality-specific preprocessors to preprocess every modality separately, after which they are concatenated. Trainable position embeddings are used to pad each modality to the same number of channels to make concatenation along the time dimension possible. Next, one applies the Perceiver encoder. [`~models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder`] is used to decode the latent representation of [`PerceiverModel`]. This decoder uses each modality-specific decoder to construct queries. The decoder queries are created based on the inputs after preprocessing. However, autoencoding an entire video in a single forward pass is computationally infeasible, hence one only uses parts of the decoder queries to do cross-attention with the latent representation. This is determined by the subsampled indices for each modality, which can be provided as additional input to the forward pass of [`PerceiverForMultimodalAutoencoding`]. [`~models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder`] also pads the decoder queries of the different modalities to the same number of channels, in order to concatenate them along the time dimension. Next, cross-attention is performed with the latent representation of [`PerceiverModel`]. Finally, [`~models.perceiver.modeling_perceiver.PerceiverMultiModalPostprocessor`] is used to turn this tensor into an actual video. It first splits up the output into the different modalities, and then applies the respective postprocessor for each modality. Note that, by masking the classification label during evaluation (i.e. simply providing a tensor of zeros for the "label" modality), this auto-encoding model becomes a Kinetics 700 video classifier. """, PERCEIVER_START_DOCSTRING, ) class PerceiverForMultimodalAutoencoding(PerceiverPreTrainedModel): def __init__(self, config: PerceiverConfig): super().__init__(config) n_audio_samples = config.num_frames * config.audio_samples_per_frame input_preprocessor = PerceiverMultimodalPreprocessor( min_padding_size=4, modalities={ "audio": PerceiverAudioPreprocessor( config, position_encoding_type="fourier", fourier_position_encoding_kwargs={ "num_bands": 192, "max_resolution": (n_audio_samples,), "sine_only": False, "concat_pos": True, }, prep_type="patches", samples_per_patch=config.samples_per_patch, ), "image": PerceiverImagePreprocessor( config, position_encoding_type="fourier", fourier_position_encoding_kwargs={ "num_bands": 32, "max_resolution": (config.num_frames, config.image_size, config.image_size), "sine_only": False, "concat_pos": True, }, prep_type="patches", spatial_downsample=4, temporal_downsample=1, ), "label": PerceiverOneHotPreprocessor(config), }, mask_probs={"image": 0.0, "audio": 0.0, "label": 1.0}, ) image_decoder = PerceiverBasicVideoAutoencodingDecoder( config, # Autoencoding, don't pass inputs to the queries. concat_preprocessed_input=False, output_shape=config.output_shape, output_num_channels=config.output_num_channels, use_query_residual=False, position_encoding_only=True, position_encoding_type="fourier", fourier_position_encoding_kwargs={ "num_bands": 32, "max_resolution": (config.num_frames, config.image_size, config.image_size), "sine_only": False, "concat_pos": True, }, ) decoder = PerceiverMultimodalDecoder( config, # Autoencoding, don't pass inputs to the queries. concat_preprocessed_input=False, # Modality specific decoders are used ONLY to generate queries. # All modalties are decoded together using a unified decoder. modalities={ "audio": PerceiverBasicDecoder( config, # Autoencoding, don't pass inputs to the queries. concat_preprocessed_input=False, output_index_dims=(n_audio_samples // config.samples_per_patch,), output_num_channels=config.output_num_channels, use_query_residual=False, position_encoding_only=True, position_encoding_type="fourier", fourier_position_encoding_kwargs={ "num_bands": 192, "max_resolution": (n_audio_samples,), "sine_only": False, "concat_pos": True, }, ), "image": image_decoder, "label": PerceiverClassificationDecoder( config, # Autoencoding, don't pass inputs to the queries. concat_preprocessed_input=False, use_query_residual=False, position_encoding_only=True, position_encoding_type="trainable", trainable_position_encoding_kwargs={ "num_channels": config._label_trainable_num_channels, "index_dims": 1, }, ), }, num_outputs=None, output_num_channels=config.output_num_channels, use_query_residual=False, ) output_postprocessor = PerceiverMultimodalPostprocessor( modalities={ "audio": PerceiverAudioPostprocessor(config, in_channels=config.output_num_channels), "image": PerceiverProjectionPostprocessor(in_channels=config.output_num_channels, out_channels=3), "label": PerceiverClassificationPostprocessor(config, in_channels=config.output_num_channels), } ) self.perceiver = PerceiverModel( config, input_preprocessor=input_preprocessor, decoder=decoder, output_postprocessor=output_postprocessor, ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(PERCEIVER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, inputs: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, subsampled_output_points: Optional[Dict[str, torch.Tensor]] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, PerceiverClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: Examples: ```python >>> from transformers import PerceiverForMultimodalAutoencoding >>> import torch >>> import numpy as np >>> # create multimodal inputs >>> images = torch.randn((1, 16, 3, 224, 224)) >>> audio = torch.randn((1, 30720, 1)) >>> inputs = dict(image=images, audio=audio, label=torch.zeros((images.shape[0], 700))) >>> model = PerceiverForMultimodalAutoencoding.from_pretrained("deepmind/multimodal-perceiver") >>> # in the Perceiver IO paper, videos are auto-encoded in chunks >>> # each chunk subsamples different index dimensions of the image and audio modality decoder queries >>> nchunks = 128 >>> image_chunk_size = np.prod((16, 224, 224)) // nchunks >>> audio_chunk_size = audio.shape[1] // model.config.samples_per_patch // nchunks >>> # process the first chunk >>> chunk_idx = 0 >>> subsampling = { ... "image": torch.arange(image_chunk_size * chunk_idx, image_chunk_size * (chunk_idx + 1)), ... "audio": torch.arange(audio_chunk_size * chunk_idx, audio_chunk_size * (chunk_idx + 1)), ... "label": None, ... } >>> outputs = model(inputs=inputs, subsampled_output_points=subsampling) >>> logits = outputs.logits >>> list(logits["audio"].shape) [1, 240] >>> list(logits["image"].shape) [1, 6272, 3] >>> list(logits["label"].shape) [1, 700] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.perceiver( inputs=inputs, attention_mask=attention_mask, subsampled_output_points=subsampled_output_points, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = outputs.logits if return_dict else outputs[0] loss = None if labels is not None: raise NotImplementedError("Multimodal autoencoding training is not yet supported") if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return PerceiverClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) # Below: position encodings def build_position_encoding( position_encoding_type, out_channels=None, project_pos_dim=-1, trainable_position_encoding_kwargs=None, fourier_position_encoding_kwargs=None, ): """ Builds the position encoding. Args: - out_channels: refers to the number of channels of the position encodings. - project_pos_dim: if specified, will project the position encodings to this dimension. """ if position_encoding_type == "trainable": if not trainable_position_encoding_kwargs: raise ValueError("Make sure to pass trainable_position_encoding_kwargs") output_pos_enc = PerceiverTrainablePositionEncoding(**trainable_position_encoding_kwargs) elif position_encoding_type == "fourier": # We don't use the index_dims argument, as this is only known during the forward pass if not fourier_position_encoding_kwargs: raise ValueError("Make sure to pass fourier_position_encoding_kwargs") output_pos_enc = PerceiverFourierPositionEncoding(**fourier_position_encoding_kwargs) else: raise ValueError(f"Unknown position encoding type: {position_encoding_type}.") # Optionally, project the position encoding to a target dimension: positions_projection = nn.Linear(out_channels, project_pos_dim) if project_pos_dim > 0 else nn.Identity() return output_pos_enc, positions_projection # Below: Perceiver decoders class PerceiverAbstractDecoder(nn.Module, metaclass=abc.ABCMeta): """Perceiver abstract decoder.""" @abc.abstractmethod def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None): raise NotImplementedError @property @abc.abstractmethod def num_query_channels(self): raise NotImplementedError @abc.abstractmethod def forward(self, query, z, query_mask=None): raise NotImplementedError class PerceiverProjectionDecoder(PerceiverAbstractDecoder): """ Baseline projection decoder (no cross-attention). Args: config ([`PerceiverConfig`]): Model configuration. """ def __init__(self, config): super().__init__() self.classifier = nn.Linear(config.d_latents, config.num_labels) def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None): return None def forward( self, query: torch.Tensor, z: torch.FloatTensor, query_mask: Optional[torch.FloatTensor] = None ) -> torch.FloatTensor: # (batch_size, num_latents, d_latents) -> (batch_size, d_latents) z = torch.mean(z, dim=1) # (batch_size, d_latents) -> (batch_size, config.num_labels) logits = self.classifier(z) return logits class PerceiverBasicDecoder(PerceiverAbstractDecoder): """ Cross-attention-based decoder. This class can be used to decode the final hidden states of the latents using a cross-attention operation, in which the latents produce keys and values. The shape of the output of this class depends on how one defines the output queries (also called decoder queries). Args: config ([*PerceiverConfig*]): Model configuration. output_num_channels (`int`, *optional*): The number of channels in the output. Will only be used in case *final_project* is set to `True`. position_encoding_type (`str`, *optional*, defaults to "trainable"): The type of position encoding to use. Can be either "trainable", "fourier", or "none". output_index_dims (`int`, *optional*): The number of dimensions of the output queries. Ignored if 'position_encoding_type' == 'none'. num_channels (`int`, *optional*, defaults to 128): The number of channels of the decoder queries. Ignored if 'position_encoding_type' == 'none'. qk_channels (`int`, *optional*): The number of channels of the queries and keys in the cross-attention layer. v_channels (`int`, *optional*): The number of channels of the values in the cross-attention layer. num_heads (`int`, *optional*, defaults to 1): The number of attention heads in the cross-attention layer. widening_factor (`int`, *optional*, defaults to 1): The widening factor of the cross-attention layer. use_query_residual (`bool`, *optional*, defaults to `False`): Whether to use a residual connection between the query and the output of the cross-attention layer. concat_preprocessed_input (`bool`, *optional*, defaults to `False`): Whether to concatenate the preprocessed input to the query. final_project (`bool`, *optional*, defaults to `True`): Whether to project the output of the cross-attention layer to a target dimension. position_encoding_only (`bool`, *optional*, defaults to `False`): Whether to only use this class to define output queries. """ def __init__( self, config: PerceiverConfig, output_num_channels: int, position_encoding_type: Optional[str] = "trainable", # The following 2 arguments are ignored if position_encoding_type == 'none': output_index_dims: Optional[int] = None, num_channels: Optional[int] = 128, subsampled_index_dims: Optional[int] = None, qk_channels: Optional[int] = None, v_channels: Optional[int] = None, num_heads: Optional[int] = 1, widening_factor: Optional[int] = 1, use_query_residual: Optional[bool] = False, concat_preprocessed_input: Optional[bool] = False, final_project: Optional[bool] = True, position_encoding_only: Optional[bool] = False, **position_encoding_kwargs, ) -> None: super().__init__() self.output_num_channels = output_num_channels # If `none`, the decoder will not construct any position encodings. # You should construct your own when querying the decoder. self.output_position_encodings = None self.position_encoding_type = position_encoding_type self.position_encoding_kwargs = position_encoding_kwargs if position_encoding_type != "none": self.output_position_encodings, self.positions_projection = build_position_encoding( position_encoding_type=position_encoding_type, **position_encoding_kwargs ) self.output_index_dims = output_index_dims self.num_channels = num_channels if subsampled_index_dims is None: subsampled_index_dims = output_index_dims self.subsampled_index_dims = subsampled_index_dims self.concat_preprocessed_input = concat_preprocessed_input self.final_project = final_project self.position_encoding_only = position_encoding_only # for multimodal autoencoding, we don't need the decoder cross-attention and final layer # so then we will set position_encoding_only to True if not self.position_encoding_only: self.decoding_cross_attention = PerceiverLayer( config, is_cross_attention=True, qk_channels=qk_channels, v_channels=v_channels, num_heads=num_heads, q_dim=num_channels, kv_dim=config.d_latents, widening_factor=widening_factor, use_query_residual=use_query_residual, ) self.final_layer = nn.Linear(num_channels, output_num_channels) if final_project else nn.Identity() @property def num_query_channels(self) -> int: if self.position_encoding_type == "none": # Queries come from elsewhere raise ValueError( "You cannot calculate number of decoder query channels when position_encoding_type is set to none" ) if self.position_encoding_only: if "project_pos_dim" in self.position_encoding_kwargs: return self.position_encoding_kwargs["project_pos_dim"] return self.output_position_encodings.output_size() if self.final_project: return self.output_num_channels return self.num_channels def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None): if self.position_encoding_type == "none": # Queries come from elsewhere raise ValueError("You cannot construct decoder queries when position_encoding_type is set to none") if subsampled_points is not None: # subsampled_points are the indices if the inputs would be flattened # however, the inputs aren't flattened, that's why we use unravel_index # to get the indices for the unflattened array # unravel_index returns a tuple (x_idx, y_idx, ...) # stack to get the [n, d] tensor of coordinates indices = [torch.from_numpy(x) for x in np.unravel_index(subsampled_points.cpu(), self.output_index_dims)] pos = torch.stack(indices, dim=1) batch_size = inputs.shape[0] # Map these coordinates to [-1, 1] pos = -1 + 2 * pos / torch.tensor(self.output_index_dims)[None, :] pos = torch.broadcast_to(pos[None], [batch_size, pos.shape[0], pos.shape[1]]) # Construct the position encoding. if self.position_encoding_type == "trainable": pos_emb = self.output_position_encodings(batch_size) elif self.position_encoding_type == "fourier": pos_emb = self.output_position_encodings( self.output_index_dims, batch_size=batch_size, device=inputs.device, dtype=inputs.dtype, pos=pos ) # Optionally project them to a target dimension. pos_emb = self.positions_projection(pos_emb) pos_emb = torch.reshape(pos_emb, [pos_emb.shape[0], -1, pos_emb.shape[-1]]) else: batch_size = inputs.shape[0] index_dims = inputs.shape[2:] # Construct the position encoding. if self.position_encoding_type == "trainable": pos_emb = self.output_position_encodings(batch_size) elif self.position_encoding_type == "fourier": pos_emb = self.output_position_encodings( index_dims, batch_size, device=inputs.device, dtype=inputs.dtype ) # Optionally project them to a target dimension. pos_emb = self.positions_projection(pos_emb) if self.concat_preprocessed_input: if inputs_without_pos is None: raise ValueError("Value is required for inputs_without_pos if concat_preprocessed_input is True") pos_emb = torch.cat([inputs_without_pos, pos_emb], dim=-1) return pos_emb def forward( self, query: torch.Tensor, z: torch.FloatTensor, query_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> PerceiverDecoderOutput: # Cross-attention decoding. # key, value: B x N x K; query: B x M x K # Attention maps -> B x N x M # Output -> B x M x K cross_attentions = () if output_attentions else None layer_outputs = self.decoding_cross_attention( query, attention_mask=query_mask, head_mask=None, inputs=z, inputs_mask=None, output_attentions=output_attentions, ) output = layer_outputs[0] if output_attentions: cross_attentions = cross_attentions + (layer_outputs[1],) logits = self.final_layer(output) return PerceiverDecoderOutput(logits=logits, cross_attentions=cross_attentions) class PerceiverClassificationDecoder(PerceiverAbstractDecoder): """ Cross-attention based classification decoder. Light-weight wrapper of [`PerceiverBasicDecoder`] for logit output. Will turn the output of the Perceiver encoder which is of shape (batch_size, num_latents, d_latents) to a tensor of shape (batch_size, num_labels). The queries are of shape (batch_size, 1, num_labels). Args: config ([`PerceiverConfig`]): Model configuration. """ def __init__(self, config, **decoder_kwargs): super().__init__() self.num_labels = config.num_labels self.decoder = PerceiverBasicDecoder( config, output_num_channels=self.num_labels, output_index_dims=1, # Predict a single logit array. **decoder_kwargs, ) @property def num_query_channels(self) -> int: return self.decoder.num_query_channels def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None): return self.decoder.decoder_query( inputs, modality_sizes, inputs_without_pos, subsampled_points=subsampled_points ) def forward( self, query: torch.Tensor, z: torch.FloatTensor, query_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> PerceiverDecoderOutput: decoder_outputs = self.decoder(query, z, output_attentions=output_attentions) # B x 1 x num_classes -> B x num_classes logits = decoder_outputs.logits[:, 0, :] return PerceiverDecoderOutput(logits=logits, cross_attentions=decoder_outputs.cross_attentions) class PerceiverOpticalFlowDecoder(PerceiverAbstractDecoder): """Cross-attention based optical flow decoder.""" def __init__(self, config, output_image_shape, output_num_channels=2, rescale_factor=100.0, **decoder_kwargs): super().__init__() self.output_image_shape = output_image_shape self.output_num_channels = output_num_channels self.rescale_factor = rescale_factor self.decoder = PerceiverBasicDecoder(config, output_num_channels=output_num_channels, **decoder_kwargs) @property def num_query_channels(self) -> int: return self.decoder.num_query_channels def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None): if subsampled_points is not None: raise ValueError("FlowDecoder doesn't support subsampling yet.") return inputs def forward( self, query: torch.Tensor, z: torch.FloatTensor, query_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> PerceiverDecoderOutput: decoder_outputs = self.decoder(query, z, output_attentions=output_attentions) preds = decoder_outputs.logits # Output flow and rescale. preds /= self.rescale_factor preds = preds.reshape([preds.shape[0]] + list(self.output_image_shape) + [preds.shape[-1]]) return PerceiverDecoderOutput(logits=preds, cross_attentions=decoder_outputs.cross_attentions) class PerceiverBasicVideoAutoencodingDecoder(PerceiverAbstractDecoder): """ Cross-attention based video-autoencoding decoder. Light-weight wrapper of [*PerceiverBasicDecoder*] with video reshaping logic. Args: config ([*PerceiverConfig*]): Model configuration. output_shape (`List[int]`): Shape of the output as (batch_size, num_frames, height, width), excluding the channel dimension. position_encoding_type (`str`): The type of position encoding to use. Can be either "trainable", "fourier", or "none". """ def __init__( self, config: PerceiverConfig, output_shape: List[int], position_encoding_type: str, **decoder_kwargs ) -> None: super().__init__() if len(output_shape) != 4: # B, T, H, W raise ValueError(f"Expected rank 4 output_shape, got {output_shape}.") # Build the decoder components: self.output_shape = output_shape self.output_num_channels = decoder_kwargs["output_num_channels"] self.decoder = PerceiverBasicDecoder( config, output_index_dims=self.output_shape[1:4], # T*H*W position_encoding_type=position_encoding_type, **decoder_kwargs, ) @property def num_query_channels(self) -> int: return self.decoder.num_query_channels def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None): return self.decoder.decoder_query( inputs, modality_sizes=modality_sizes, inputs_without_pos=inputs_without_pos, subsampled_points=subsampled_points, ) def forward( self, query: torch.Tensor, z: torch.FloatTensor, query_mask: Optional[torch.FloatTensor] = None ) -> PerceiverDecoderOutput: decoder_outputs = self.decoder(query, z) logits = decoder_outputs.logits logits = torch.reshape(logits, self.output_shape + [logits.shape[-1]]) return PerceiverDecoderOutput(logits=logits, cross_attentions=decoder_outputs.cross_attentions) def restructure(modality_sizes: ModalitySizeType, inputs: torch.Tensor) -> Mapping[str, torch.Tensor]: """ Partitions a [B, N, C] tensor into tensors for each modality. Args: modality_sizes dict specifying the size of the modality inputs: input tensor Returns: dict mapping name of modality to its associated tensor. """ outputs = {} index = 0 # Apply a predictable ordering to the modalities for modality in sorted(modality_sizes.keys()): size = modality_sizes[modality] inp = inputs[:, index : index + size] index += size outputs[modality] = inp return outputs class PerceiverMultimodalDecoder(PerceiverAbstractDecoder): """ Multimodal decoding by composing uni-modal decoders. The *modalities* argument of the constructor is a dictionary mapping modality name to the decoder of that modality. That decoder will be used to construct queries for that modality. Modality-specific queries are padded with trainable modality-specific parameters, after which they are concatenated along the time dimension. Next, there is a shared cross attention operation across all modalities. Args: config ([*PerceiverConfig*]): Model configuration. modalities (`Dict[str, PerceiverAbstractDecoder]`): Dictionary mapping modality name to the decoder of that modality. num_outputs (`int`): The number of outputs of the decoder. output_num_channels (`int`): The number of channels in the output. min_padding_size (`int`, *optional*, defaults to 2): The minimum padding size for all modalities. The final output will have num_channels equal to the maximum channels across all modalities plus min_padding_size. subsampled_index_dims (`Dict[str, PerceiverAbstractDecoder]`, *optional*): Dictionary mapping modality name to the subsampled index dimensions to use for the decoder query of that modality. """ def __init__( self, config: PerceiverConfig, modalities: Dict[str, PerceiverAbstractDecoder], num_outputs: int, output_num_channels: int, min_padding_size: Optional[int] = 2, subsampled_index_dims: Optional[Dict[str, PerceiverAbstractDecoder]] = None, **decoder_kwargs, ) -> None: super().__init__() self.modalities = nn.ModuleDict(modalities) self.subsampled_index_dims = subsampled_index_dims self.min_padding_size = min_padding_size self.output_num_channels = output_num_channels self.num_outputs = num_outputs self.decoder = PerceiverBasicDecoder( config, output_index_dims=(num_outputs,), output_num_channels=output_num_channels, position_encoding_type="none", num_channels=self.num_query_channels, **decoder_kwargs, ) self.padding = nn.ParameterDict( { modality: nn.Parameter(torch.randn(1, self.num_query_channels - decoder.num_query_channels)) for modality, decoder in modalities.items() } ) @property def num_query_channels(self) -> int: max_channel_size = max(decoder.num_query_channels for _, decoder in self.modalities.items()) common_channel_size = max_channel_size + self.min_padding_size return common_channel_size def decoder_query(self, inputs, modality_sizes, inputs_without_pos=None, subsampled_points=None): # Partition the flat inputs among the different modalities inputs = restructure(modality_sizes, inputs) # Obtain modality-specific decoders' queries subsampled_points = subsampled_points or {} decoder_queries = {} for modality, decoder in self.modalities.items(): # Get input_without_pos for this modality if it exists. input_without_pos = None if inputs_without_pos is not None: input_without_pos = inputs_without_pos.get(modality, None) query = decoder.decoder_query( inputs=inputs[modality], modality_sizes=None, inputs_without_pos=input_without_pos, subsampled_points=subsampled_points.get(modality, None), ) decoder_queries[modality] = query # Pad all queries with trainable position encodings to make them have the same channels def embed(modality, x): x = torch.reshape(x, [x.shape[0], np.prod(x.shape[1:-1]), x.shape[-1]]) pos = self.padding[modality] pos = torch.broadcast_to(pos, [x.shape[0], x.shape[1], self.num_query_channels - x.shape[2]]) return torch.cat([x, pos], dim=2) # Apply a predictable ordering to the modalities return torch.cat( [embed(modality, decoder_queries[modality]) for modality in sorted(self.modalities.keys())], dim=1 ) def forward( self, query: torch.Tensor, z: torch.FloatTensor, query_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> torch.Tensor: # B x 1 x num_classes -> B x num_classes decoder_outputs = self.decoder(query, z, output_attentions=output_attentions) return decoder_outputs # Below: IO pre- and post-processor classes for Perceiver. def space_to_depth(frames: torch.Tensor, temporal_block_size: int = 1, spatial_block_size: int = 1) -> torch.Tensor: """ Space to depth transform. Rearranges blocks of spatial data, into depth. This function assumes the channels to be first, but will place the channels last after transformation. Based on https://discuss.pytorch.org/t/is-there-any-layer-like-tensorflows-space-to-depth-function/3487/15. """ if len(frames.shape) == 4: batch_size, num_channels, height, width = frames.shape # split up dimensions (height by spatial_block_size, width by spatial_block_size) frames = frames.view( batch_size, num_channels, height // spatial_block_size, spatial_block_size, width // spatial_block_size, spatial_block_size, ) # move blocks to last dimension: (batch_size, H//bs, W//bs, bs, bs, C) frames = frames.permute(0, 2, 4, 3, 5, 1).contiguous() # concatenate blocks along channel dimension: (batch_size, H//bs, W//bs, bs*bs*C) frames = frames.view( batch_size, height // spatial_block_size, width // spatial_block_size, (spatial_block_size**2) * num_channels, ) return frames elif len(frames.shape) == 5: batch_size, time, num_channels, height, width = frames.shape # split up dimensions (time by temporal_block_size, height by spatial_block_size, width by spatial_block_size) frames = frames.view( batch_size, time // temporal_block_size, temporal_block_size, num_channels, height // spatial_block_size, spatial_block_size, width // spatial_block_size, spatial_block_size, ) # move blocks to last dimension: (batch_size, T//ts, H//bs, W//bs, ts, bs, bs, C) frames = frames.permute(0, 1, 4, 6, 2, 5, 7, 3).contiguous() # concatenate blocks along channel dimension: (batch_size, T//ts, H//bs, W//bs, ts*bs*bs*C) frames = frames.view( batch_size, time // temporal_block_size, height // spatial_block_size, width // spatial_block_size, temporal_block_size * (spatial_block_size**2) * num_channels, ) return frames else: raise ValueError( "Frames should be of rank 4 (batch, channels, height, width)" " or rank 5 (batch, time, channels, height, width)" ) class Conv2dSamePadding(nn.Conv2d): """ Conv2d layer with padding="same" support. Source: https://gist.github.com/sumanmichael/4de9dee93f972d47c80c4ade8e149ea6 """ def __init__(self, *args, **kwargs): super(Conv2dSamePadding, self).__init__(*args, **kwargs) self.zero_pad_2d = nn.ZeroPad2d( reduce(__add__, [(k // 2 + (k - 2 * (k // 2)) - 1, k // 2) for k in self.kernel_size[::-1]]) ) def forward(self, input): return self._conv_forward(self.zero_pad_2d(input), self.weight, self.bias) class Conv2DDownsample(nn.Module): """Downsamples 4x by applying a 2D convolution and doing max pooling.""" def __init__( self, num_layers: int = 1, in_channels: int = 3, out_channels: int = 64, use_batchnorm: bool = True, ): """ Constructs a Conv2DDownsample model. Args: in_channels (`int`, *optional*, defaults to 3): The number of input channels. out_channels (`int`, *optional*, defaults to 64): The number of conv output channels. use_batchnorm (`bool`, *optional*, defaults to `True`): Whether to use batchnorm. """ super().__init__() self.conv = Conv2dSamePadding( in_channels=in_channels, out_channels=out_channels, kernel_size=7, stride=2, bias=False ) self.batchnorm = nn.BatchNorm2d(num_features=out_channels) if use_batchnorm else nn.Identity() self.relu = nn.ReLU() self.max_pool = nn.MaxPool2d(kernel_size=3, stride=2) def forward(self, inputs: torch.Tensor) -> torch.Tensor: out = self.conv(inputs) out = self.batchnorm(out) out = self.relu(out) out = self.max_pool(out) return out def generate_fourier_features(pos, num_bands, max_resolution=(224, 224), concat_pos=True, sine_only=False): """ Generate a Fourier frequency position encoding with linear spacing. Args: pos (`torch.LongTensor` of shape `(batch_size, sequence_length, dim)`): The Tensor containing the position of n points in d dimensional space. num_bands (`int`): The number of frequency bands (K) to use. max_resolution (`Tuple[int]`, *optional*, defaults to (224, 224)): The maximum resolution (i.e. the number of pixels per dim). A tuple representing resolution for each dimension. concat_pos (`bool`, *optional*, defaults to `True`): Whether to concatenate the input position encoding to the Fourier features. sine_only (`bool`, *optional*, defaults to `False`): Whether to use a single phase (sin) or two (sin/cos) for each frequency band. Returns: `torch.FloatTensor` of shape `(batch_size, sequence_length, n_channels)`: The Fourier position embeddings. If `concat_pos` is `True` and `sine_only` is `False`, output dimensions are ordered as: [dim_1, dim_2, ..., dim_d, sin(pi*f_1*dim_1), ..., sin(pi*f_K*dim_1), ..., sin(pi*f_1*dim_d), ..., sin(pi*f_K*dim_d), cos(pi*f_1*dim_1), ..., cos(pi*f_K*dim_1), ..., cos(pi*f_1*dim_d), ..., cos(pi*f_K*dim_d)], where dim_i is pos[:, i] and f_k is the kth frequency band. """ batch_size = pos.shape[0] min_freq = 1.0 # Nyquist frequency at the target resolution: freq_bands = torch.stack( [torch.linspace(start=min_freq, end=res / 2, steps=num_bands) for res in max_resolution], dim=0 ) # Get frequency bands for each spatial dimension. # Output is size [n, d * num_bands] per_pos_features = pos[0, :, :][:, :, None] * freq_bands[None, :, :] per_pos_features = torch.reshape(per_pos_features, [-1, np.prod(per_pos_features.shape[1:])]) if sine_only: # Output is size [n, d * num_bands] per_pos_features = torch.sin(np.pi * (per_pos_features)) else: # Output is size [n, 2 * d * num_bands] per_pos_features = torch.cat( [torch.sin(np.pi * per_pos_features), torch.cos(np.pi * per_pos_features)], dim=-1 ) # Concatenate the raw input positions. if concat_pos: # Adds d bands to the encoding. per_pos_features = torch.cat([pos, per_pos_features.expand(batch_size, -1, -1)], dim=-1) return per_pos_features def build_linear_positions(index_dims, output_range=(-1.0, 1.0)): """ Generate an array of position indices for an N-D input array. Args: index_dims (`List[int]`): The shape of the index dimensions of the input array. output_range (`Tuple[float]`, *optional*, defaults to `(-1.0, 1.0)`): The min and max values taken by each input index dimension. Returns: `torch.FloatTensor` of shape `(index_dims[0], index_dims[1], .., index_dims[-1], N)`. """ def _linspace(n_xels_per_dim): return torch.linspace(start=output_range[0], end=output_range[1], steps=n_xels_per_dim, dtype=torch.float32) dim_ranges = [_linspace(n_xels_per_dim) for n_xels_per_dim in index_dims] array_index_grid = meshgrid(*dim_ranges, indexing="ij") return torch.stack(array_index_grid, dim=-1) class PerceiverAbstractPositionEncoding(nn.Module, metaclass=abc.ABCMeta): """Perceiver abstract position encoding.""" @property @abc.abstractmethod def num_dimensions(self) -> int: raise NotImplementedError @abc.abstractmethod def output_size(self, *args, **kwargs) -> int: raise NotImplementedError @abc.abstractmethod def forward(self, batch_size, pos): raise NotImplementedError class PerceiverTrainablePositionEncoding(PerceiverAbstractPositionEncoding): """Trainable position encoding.""" def __init__(self, index_dims, num_channels=128): super().__init__() self._num_channels = num_channels self._index_dims = index_dims index_dim = np.prod(index_dims) self.position_embeddings = nn.Parameter(torch.randn(index_dim, num_channels)) @property def num_dimensions(self) -> int: if isinstance(self._index_dims, int): return 1 return len(self._index_dims) def output_size(self, *args, **kwargs) -> int: return self._num_channels def forward(self, batch_size: int) -> torch.Tensor: position_embeddings = self.position_embeddings if batch_size is not None: position_embeddings = position_embeddings.expand(batch_size, -1, -1) return position_embeddings def _check_or_build_spatial_positions(pos, index_dims, batch_size): """ Checks or builds spatial position features (x, y, ...). Args: pos (`torch.FloatTensor`): None, or an array of position features. If None, position features are built. Otherwise, their size is checked. index_dims (`List[int]`): An iterable giving the spatial/index size of the data to be featurized. batch_size (`int`): The batch size of the data to be featurized. Returns: `torch.FloatTensor` of shape `(batch_size, prod(index_dims))` an array of position features. """ if pos is None: pos = build_linear_positions(index_dims) # equivalent to `torch.broadcast_to(pos[None], (batch_size,) + pos.shape)` # but `torch.broadcast_to` cannot be converted to ONNX pos = pos[None].expand((batch_size,) + pos.shape) pos = torch.reshape(pos, [batch_size, np.prod(index_dims), -1]) else: # Just a warning label: you probably don't want your spatial features to # have a different spatial layout than your pos coordinate system. # But feel free to override if you think it'll work! if pos.shape[-1] != len(index_dims): raise ValueError("Spatial features have the wrong number of dimensions.") return pos class PerceiverFourierPositionEncoding(PerceiverAbstractPositionEncoding): """Fourier (Sinusoidal) position encoding.""" def __init__(self, num_bands, max_resolution, concat_pos=True, sine_only=False): super().__init__() self.num_bands = num_bands self.max_resolution = max_resolution self.concat_pos = concat_pos self.sine_only = sine_only @property def num_dimensions(self) -> int: return len(self.max_resolution) def output_size(self): """Returns size of positional encodings last dimension.""" num_dims = len(self.max_resolution) encoding_size = self.num_bands * num_dims if not self.sine_only: encoding_size *= 2 if self.concat_pos: encoding_size += self.num_dimensions return encoding_size def forward( self, index_dims: List[int], batch_size: int, device: torch.device, dtype: torch.dtype, pos: torch.FloatTensor = None, ) -> torch.FloatTensor: pos = _check_or_build_spatial_positions(pos, index_dims, batch_size) fourier_pos_enc = generate_fourier_features( pos, num_bands=self.num_bands, max_resolution=self.max_resolution, concat_pos=self.concat_pos, sine_only=self.sine_only, ).to(device=device, dtype=dtype) return fourier_pos_enc class AbstractPreprocessor(nn.Module): @property def num_channels(self) -> int: """Returns size of preprocessor output.""" raise NotImplementedError() class PerceiverTextPreprocessor(AbstractPreprocessor): """ Text preprocessing for Perceiver Encoder. Can be used to embed `inputs` and add positional encodings. The dimensionality of the embeddings is determined by the `d_model` attribute of the configuration. Args: config ([`PerceiverConfig`]): Model configuration. """ def __init__(self, config: PerceiverConfig) -> None: super().__init__() self.config = config self.embeddings = nn.Embedding(num_embeddings=config.vocab_size, embedding_dim=config.d_model) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.d_model) @property def num_channels(self) -> int: return self.config.d_model def forward(self, inputs: torch.LongTensor, pos: Optional[torch.Tensor] = None, network_input_is_1d: bool = True): embeddings_without_pos = self.embeddings(inputs) seq_length = inputs.shape[1] position_ids = torch.arange(0, seq_length, device=inputs.device) embeddings = embeddings_without_pos + self.position_embeddings(position_ids) return embeddings, None, embeddings_without_pos class PerceiverEmbeddingDecoder(nn.Module): """ Module to decode embeddings (for masked language modeling). Args: config ([`PerceiverConfig`]): Model configuration. """ def __init__(self, config: PerceiverConfig) -> None: super().__init__() self.config = config self.vocab_size = config.vocab_size self.bias = nn.Parameter(torch.zeros(self.vocab_size)) def forward(self, hidden_states: torch.Tensor, embedding_layer: torch.Tensor) -> torch.Tensor: batch_size, seq_len, d_model = hidden_states.shape # Flatten batch dim output = torch.matmul(hidden_states.reshape([-1, d_model]), embedding_layer.weight.transpose(0, 1)) output = output + self.bias return output.reshape([batch_size, seq_len, self.vocab_size]) class PerceiverMultimodalPostprocessor(nn.Module): """ Multimodal postprocessing for Perceiver. Can be used to combine modality-specific postprocessors into a single postprocessor. Args: modalities (`Mapping[str, PostprocessorType]`): Dictionary mapping modality name to postprocessor class for that modality. input_is_dict (`bool`, *optional*, defaults to `False`): If True, input is assumed to be dictionary structured, and outputs keep the same dictionary shape. If False, input is a tensor which is sliced up during postprocessing by *modality_sizes*. """ def __init__(self, modalities: Mapping[str, PostprocessorType], input_is_dict: bool = False): super().__init__() self.modalities = nn.ModuleDict(modalities) self.input_is_dict = input_is_dict def forward( self, inputs: torch.Tensor, pos: Optional[torch.Tensor] = None, modality_sizes=None ) -> Mapping[str, torch.Tensor]: if not self.input_is_dict: # Slice up modalities by their sizes. if modality_sizes is None: raise ValueError("Modality sizes should be specified if input is not a dictionary.") inputs = restructure(modality_sizes=modality_sizes, inputs=inputs) outputs = { modality: postprocessor(inputs[modality], pos=pos, modality_sizes=None) for modality, postprocessor in self.modalities.items() } return outputs class PerceiverClassificationPostprocessor(nn.Module): """ Classification postprocessing for Perceiver. Can be used to convert the decoder output to classification logits. Args: config ([*PerceiverConfig*]): Model configuration. in_channels (`int`): Number of channels in the input. """ def __init__(self, config: PerceiverConfig, in_channels: int) -> None: super().__init__() self.classifier = nn.Linear(in_channels, config.num_labels) def forward(self, inputs, pos: Optional[torch.Tensor] = None, modality_sizes=None) -> torch.Tensor: logits = self.classifier(inputs) return logits[:, 0, :] class PerceiverAudioPostprocessor(nn.Module): """ Audio postprocessing for Perceiver. Can be used to convert the decoder output to audio features. Args: config ([*PerceiverConfig*]): Model configuration. in_channels (`int`): Number of channels in the input. postproc_type (`str`, *optional*, defaults to `"patches"`): Postprocessor type to use. Currently, only "patches" is supported. """ def __init__(self, config: PerceiverConfig, in_channels: int, postproc_type: str = "patches") -> None: super().__init__() if postproc_type not in ("patches",): # to be supported: 'conv', 'patches', 'pixels' raise ValueError("Invalid postproc_type!") # Architecture parameters: self.classifier = nn.Linear(in_channels, config.samples_per_patch) def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor] = None, modality_sizes=None) -> torch.Tensor: logits = self.classifier(inputs) return torch.reshape(logits, [inputs.shape[0], -1]) class PerceiverProjectionPostprocessor(nn.Module): """ Projection postprocessing for Perceiver. Can be used to project the channels of the decoder output to a lower dimension. Args: in_channels (`int`): Number of channels in the input. out_channels (`int`): Number of channels in the output. """ def __init__(self, in_channels: int, out_channels: int) -> None: super().__init__() self.classifier = nn.Linear(in_channels, out_channels) def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor] = None, modality_sizes=None) -> torch.Tensor: logits = self.classifier(inputs) return logits class PerceiverImagePreprocessor(AbstractPreprocessor): """ Image preprocessing for Perceiver Encoder. Note: the *out_channels* argument refers to the output channels of a convolutional layer, if *prep_type* is set to "conv1x1" or "conv". If one adds absolute position embeddings, one must make sure the *num_channels* of the position encoding kwargs are set equal to the *out_channels*. Args: config ([*PerceiverConfig*]): Model configuration. prep_type (`str`, *optional*, defaults to `"conv"`): Preprocessing type. Can be "conv1x1", "conv", "patches", "pixels". spatial_downsample (`int`, *optional*, defaults to 4): Spatial downsampling factor. temporal_downsample (`int`, *optional*, defaults to 1): Temporal downsampling factor (only relevant in case a time dimension is present). position_encoding_type (`str`, *optional*, defaults to `"fourier"`): Position encoding type. Can be "fourier" or "trainable". in_channels (`int`, *optional*, defaults to 3): Number of channels in the input. out_channels (`int`, *optional*, defaults to 64): Number of channels in the output. conv_after_patching (`bool`, *optional*, defaults to `False`): Whether to apply a convolutional layer after patching. conv_after_patching_in_channels (`int`, *optional*, defaults to 54): Number of channels in the input of the convolutional layer after patching. conv2d_use_batchnorm (`bool`, *optional*, defaults to `True`): Whether to use batch normalization in the convolutional layer. concat_or_add_pos (`str`, *optional*, defaults to `"concat"`): How to concatenate the position encoding to the input. Can be "concat" or "add". project_pos_dim (`int`, *optional*, defaults to -1): Dimension of the position encoding to project to. If -1, no projection is applied. **position_encoding_kwargs (`Dict`, *optional*): Keyword arguments for the position encoding. """ def __init__( self, config, prep_type="conv", spatial_downsample: int = 4, temporal_downsample: int = 1, position_encoding_type: str = "fourier", in_channels: int = 3, out_channels: int = 64, conv_after_patching: bool = False, conv_after_patching_in_channels: int = 54, # only relevant when conv_after_patching = True conv2d_use_batchnorm: bool = True, concat_or_add_pos: str = "concat", project_pos_dim: int = -1, **position_encoding_kwargs, ): super().__init__() self.config = config if prep_type not in ("conv", "patches", "pixels", "conv1x1"): raise ValueError(f"Prep_type {prep_type} is invalid") if concat_or_add_pos not in ["concat", "add"]: raise ValueError(f"Invalid value {concat_or_add_pos} for concat_or_add_pos.") self.in_channels = in_channels self.prep_type = prep_type self.spatial_downsample = spatial_downsample self.temporal_downsample = temporal_downsample self.position_encoding_type = position_encoding_type self.concat_or_add_pos = concat_or_add_pos self.conv_after_patching = conv_after_patching self.out_channels = out_channels if self.prep_type == "conv": # Downsampling with conv is currently restricted convnet_num_layers = math.log(spatial_downsample, 4) convnet_num_layers_is_int = convnet_num_layers == np.round(convnet_num_layers) if not convnet_num_layers_is_int or temporal_downsample != 1: raise ValueError( "Only powers of 4 expected for spatial and 1 expected for temporal downsampling with conv." ) self.convnet = Conv2DDownsample( in_channels=in_channels, num_layers=int(convnet_num_layers), out_channels=out_channels, use_batchnorm=conv2d_use_batchnorm, ) elif self.prep_type == "conv1x1": if temporal_downsample != 1: raise ValueError("Conv1x1 does not downsample in time.") self.convnet_1x1 = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=(1, 1), # spatial_downsample is unconstrained for 1x1 convolutions. stride=(spatial_downsample, spatial_downsample), ) # Position embeddings self.project_pos_dim = project_pos_dim self.position_embeddings, self.positions_projection = build_position_encoding( position_encoding_type=position_encoding_type, out_channels=out_channels, project_pos_dim=project_pos_dim, **position_encoding_kwargs, ) # Optional convolutional layer after patches. self.conv_after_patches = ( nn.Linear(conv_after_patching_in_channels, self.out_channels) if conv_after_patching else nn.Identity() ) @property def num_channels(self) -> int: # Let's assume that the number of resolutions (in the context of image preprocessing) # of the input data is 2 or 3 depending on whether we are processing image or video respectively. # In this case, for convenience, we will declare is_temporal variable, # which will show whether the data has a temporal dimension or not. is_temporal = self.position_embeddings.num_dimensions > 2 # position embedding if self.project_pos_dim > 0: pos_dim = self.project_pos_dim else: pos_dim = self.position_embeddings.output_size() if self.concat_or_add_pos == "add": return pos_dim # inputs if self.conv_after_patching or self.prep_type in ("conv1x1", "conv"): inp_dim = self.out_channels elif self.prep_type == "pixels": inp_dim = self.in_channels if not is_temporal: inp_dim = math.ceil(inp_dim / self.spatial_downsample) elif self.prep_type == "patches": if self.conv_after_patching: inp_dim = self.out_channels else: inp_dim = self.in_channels * self.spatial_downsample**2 if is_temporal: inp_dim *= self.temporal_downsample return inp_dim + pos_dim def _build_network_inputs(self, inputs: torch.Tensor, network_input_is_1d: bool = True): """ Construct the final input, including position encoding. This method expects the inputs to always have channels as last dimension. """ batch_size = inputs.shape[0] index_dims = inputs.shape[1:-1] indices = np.prod(index_dims) # Flatten input features to a 1D index dimension if necessary. if len(inputs.shape) > 3 and network_input_is_1d: inputs = torch.reshape(inputs, [batch_size, indices, -1]) # Construct the position encoding. if self.position_encoding_type == "trainable": pos_enc = self.position_embeddings(batch_size) elif self.position_encoding_type == "fourier": pos_enc = self.position_embeddings(index_dims, batch_size, device=inputs.device, dtype=inputs.dtype) # Optionally project them to a target dimension. pos_enc = self.positions_projection(pos_enc) if not network_input_is_1d: # Reshape pos to match the input feature shape # if the network takes non-1D inputs sh = inputs.shape pos_enc = torch.reshape(pos_enc, list(sh)[:-1] + [-1]) if self.concat_or_add_pos == "concat": inputs_with_pos = torch.cat([inputs, pos_enc], dim=-1) elif self.concat_or_add_pos == "add": inputs_with_pos = inputs + pos_enc return inputs_with_pos, inputs def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor] = None, network_input_is_1d: bool = True): if self.prep_type == "conv": # Convnet image featurization. # Downsamples spatially by a factor of 4 inputs = self.convnet(inputs) elif self.prep_type == "conv1x1": # map inputs to self.out_channels inputs = self.convnet_1x1(inputs) elif self.prep_type == "pixels": # if requested, downsamples in the crudest way if inputs.ndim == 4: inputs = inputs[:: self.spatial_downsample, :: self.spatial_downsample] elif inputs.ndim == 5: inputs = inputs[ :, :: self.temporal_downsample, :, :: self.spatial_downsample, :: self.spatial_downsample ] else: raise ValueError("Unsupported data format for pixels.") elif self.prep_type == "patches": # Space2depth featurization. # Video: B x T x C x H x W inputs = space_to_depth( inputs, temporal_block_size=self.temporal_downsample, spatial_block_size=self.spatial_downsample ) if inputs.ndim == 5 and inputs.shape[1] == 1: # for flow inputs = inputs.squeeze(dim=1) # Optionally apply conv layer. inputs = self.conv_after_patches(inputs) if self.prep_type != "patches": # move channels to last dimension, as the _build_network_inputs method below expects this if inputs.ndim == 4: inputs = inputs.permute(0, 2, 3, 1) elif inputs.ndim == 5: inputs = inputs.permute(0, 1, 3, 4, 2) else: raise ValueError("Unsupported data format for conv1x1.") inputs, inputs_without_pos = self._build_network_inputs(inputs, network_input_is_1d) modality_sizes = None # Size for each modality, only needed for multimodal return inputs, modality_sizes, inputs_without_pos class PerceiverOneHotPreprocessor(AbstractPreprocessor): """ One-hot preprocessor for Perceiver Encoder. Can be used to add a dummy index dimension to the input. Args: config ([`PerceiverConfig`]): Model configuration. """ def __init__(self, config: PerceiverConfig) -> None: super().__init__() self.config: PerceiverConfig = config @property def num_channels(self) -> int: return self.config.num_labels def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor] = None, network_input_is_1d: bool = True): # Add a dummy index dimension. inputs = inputs[:, None, :] # No position encodings, so the 1st (input) and 3rd (inputs_without_pos) # outputs are identical. return inputs, None, inputs class PerceiverAudioPreprocessor(AbstractPreprocessor): """ Audio preprocessing for Perceiver Encoder. Args: config ([*PerceiverConfig*]): Model configuration. prep_type (`str`, *optional*, defaults to `"patches"`): Preprocessor type to use. Only "patches" is supported. samples_per_patch (`int`, *optional*, defaults to 96): Number of samples per patch. position_encoding_type (`str`, *optional*, defaults to `"fourier"`): Type of position encoding to use. Can be "trainable" or "fourier". concat_or_add_pos (`str`, *optional*, defaults to `"concat"`): How to concatenate the position encoding to the input. Can be "concat" or "add". out_channels (`int`, *optional*, defaults to 64): Number of channels in the output. project_pos_dim (`int`, *optional*, defaults to -1): Dimension of the position encoding to project to. If -1, no projection is applied. **position_encoding_kwargs (`Dict`, *optional*): Keyword arguments for the position encoding. """ def __init__( self, config, prep_type: str = "patches", samples_per_patch: int = 96, position_encoding_type: str = "fourier", concat_or_add_pos: str = "concat", out_channels=64, project_pos_dim=-1, **position_encoding_kwargs, ): super().__init__() self.config = config if prep_type not in ("patches",): raise ValueError(f"Prep_type {prep_type} is invalid, can only be 'patches'.") if concat_or_add_pos not in ["concat", "add"]: raise ValueError(f"Concat_or_pos {concat_or_add_pos} is invalid, can only be 'concat' or 'add'.") self.samples_per_patch = samples_per_patch self.position_encoding_type = position_encoding_type self.concat_or_add_pos = concat_or_add_pos self.project_pos_dim = project_pos_dim # Position embeddings self.position_embeddings, self.positions_projection = build_position_encoding( position_encoding_type=position_encoding_type, out_channels=out_channels, project_pos_dim=project_pos_dim, **position_encoding_kwargs, ) @property def num_channels(self) -> int: # position embedding if self.project_pos_dim > 0: pos_dim = self.project_pos_dim else: pos_dim = self.position_embeddings.output_size() if self.concat_or_add_pos == "add": return pos_dim return self.samples_per_patch + pos_dim def _build_network_inputs(self, inputs): """Construct the final input, including position encoding.""" batch_size = inputs.shape[0] index_dims = inputs.shape[1:-1] # Construct the position encoding. if self.position_encoding_type == "trainable": pos_enc = self.position_embeddings(batch_size) elif self.position_encoding_type == "fourier": pos_enc = self.position_embeddings(index_dims, batch_size, device=inputs.device, dtype=inputs.dtype) # Optionally project them to a target dimension. pos_enc = self.positions_projection(pos_enc) if self.concat_or_add_pos == "concat": inputs_with_pos = torch.cat([inputs, pos_enc], dim=-1) elif self.concat_or_add_pos == "add": inputs_with_pos = inputs + pos_enc return inputs_with_pos, inputs def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor] = None, network_input_is_1d: bool = True): inputs = torch.reshape(inputs, [inputs.shape[0], -1, self.samples_per_patch]) inputs, inputs_without_pos = self._build_network_inputs(inputs) modality_sizes = None # Size for each modality, only needed for multimodal return inputs, modality_sizes, inputs_without_pos class PerceiverMultimodalPreprocessor(AbstractPreprocessor): """ Multimodal preprocessing for Perceiver Encoder. Inputs for each modality are preprocessed, then padded with trainable position embeddings to have the same number of channels. Args: modalities (`Mapping[str, PreprocessorType]`): Dict mapping modality name to preprocessor. mask_probs (`Dict[str, float]`): Dict mapping modality name to masking probability of that modality. min_padding_size (`int`, *optional*, defaults to 2): The minimum padding size for all modalities. The final output will have num_channels equal to the maximum channels across all modalities plus min_padding_size. """ def __init__( self, modalities: Mapping[str, PreprocessorType], mask_probs: Optional[Mapping[str, float]] = None, min_padding_size: int = 2, ): super().__init__() self.modalities = nn.ModuleDict(modalities) self.min_padding_size = min_padding_size self.mask_probs = mask_probs if mask_probs is not None else {} self.padding = nn.ParameterDict( { modality: nn.Parameter(torch.randn(1, self.num_channels - preprocessor.num_channels)) for modality, preprocessor in modalities.items() } ) self.mask = nn.ParameterDict( {modality: nn.Parameter(torch.randn(1, self.num_channels)) for modality, _ in self.mask_probs.items()} ) @property def num_channels(self) -> int: max_channel_size = max(processor.num_channels for _, processor in self.modalities.items()) common_channel_size = max_channel_size + self.min_padding_size return common_channel_size def forward( self, inputs: Mapping[str, torch.Tensor], pos: Optional[torch.Tensor] = None, network_input_is_1d: bool = True ) -> PreprocessorOutputType: padded = {} modality_sizes = {} inputs_without_pos = {} for modality, preprocessor in self.modalities.items(): # preprocess each modality using the respective preprocessor. output, _, inputs_without_pos[modality] = preprocessor( inputs[modality], pos=pos, network_input_is_1d=network_input_is_1d ) # pad to the same common_channel_size. batch_size, num_samples, num_channels = output.shape pos_enc = self.padding[modality].expand(batch_size, -1, -1) padding = torch.broadcast_to( pos_enc, [batch_size, num_samples, self.num_channels - num_channels], ) output_padded = torch.cat([output, padding], dim=2) # mask if required if modality in self.mask_probs: mask_token = self.mask[modality].expand(batch_size, -1, -1) mask_prob = self.mask_probs[modality] mask = torch.bernoulli(torch.full([batch_size, num_samples], mask_prob)) mask = torch.unsqueeze(mask, dim=2).to(mask_token.device) output_padded = (1 - mask) * output_padded + mask * mask_token padded[modality] = output_padded modality_sizes[modality] = output_padded.shape[1] # Apply a predictable ordering to the modalities padded_ls = [padded[k] for k in sorted(padded.keys())] # Finally, concatenate along the time dimension final_inputs = torch.cat(padded_ls, dim=1) return final_inputs, modality_sizes, inputs_without_pos
transformers/src/transformers/models/perceiver/modeling_perceiver.py/0
{ "file_path": "transformers/src/transformers/models/perceiver/modeling_perceiver.py", "repo_id": "transformers", "token_count": 62743 }
353
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. & Google team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Pix2Struct modeling file""" import math from typing import Dict, List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPooling, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import ALL_LAYERNORM_LAYERS from ...utils import ( DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_model_forward, is_torch_fx_proxy, logging, replace_return_docstrings, ) from .configuration_pix2struct import Pix2StructConfig, Pix2StructTextConfig, Pix2StructVisionConfig logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "Pix2StructConfig" PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "google/pix2struct-textcaps-base", "google/pix2struct-textcaps-large", "google/pix2struct-base", "google/pix2struct-large", "google/pix2struct-ai2d-base", "google/pix2struct-ai2d-large", "google/pix2struct-widget-captioning-base", "google/pix2struct-widget-captioning-large", "google/pix2struct-screen2words-base", "google/pix2struct-screen2words-large", "google/pix2struct-docvqa-base", "google/pix2struct-docvqa-large", "google/pix2struct-ocrvqa-base", "google/pix2struct-ocrvqa-large", "google/pix2struct-chartqa-base", "google/pix2struct-inforgraphics-vqa-base", "google/pix2struct-inforgraphics-vqa-large", # See all Pix2StructVision models at https://huggingface.co/models?filter=pix2struct ] # Adapted from transformers.models.t5.modeling_t5.T5LayerNorm with T5->Pix2Struct class Pix2StructLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ Construct a layernorm module in the T5 style. No bias and no subtraction of mean. """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) # convert into half-precision if necessary if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states try: from apex.normalization import FusedRMSNorm Pix2StructLayerNorm = FusedRMSNorm # noqa logger.info("Discovered apex.normalization.FusedRMSNorm - will use it instead of Pix2StructLayerNorm") except ImportError: # using the normal Pix2StructLayerNorm pass except Exception: logger.warning("Discovered apex but it failed to load, falling back to Pix2StructLayerNorm") pass ALL_LAYERNORM_LAYERS.append(Pix2StructLayerNorm) class Pix2StructVisionEmbeddings(nn.Module): r""" Construct the embeddings from patch. In `Pix2Struct` the input is different from classic Vision-transformer models. Here the input is a sequence of `seq_len` flattened patches that also combines padding patches (tokens). Each patch is represented by a vector of `hidden_size` values. """ def __init__(self, config: Pix2StructConfig) -> None: super().__init__() self.patch_projection = nn.Linear(config.patch_embed_hidden_size, config.hidden_size) self.row_embedder = nn.Embedding(config.seq_len, config.hidden_size) self.column_embedder = nn.Embedding(config.seq_len, config.hidden_size) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, flattened_patches: torch.Tensor) -> torch.Tensor: # the row and column indices are stored in the first and second position of the flattened_patches # flattened_patches: `batch_size`, `seq_len`, `hidden_size` + 2 row_indices = flattened_patches[:, :, 0].long() col_indices = flattened_patches[:, :, 1].long() flattened_patches = flattened_patches[:, :, 2:] embeddings = self.patch_projection(flattened_patches) row_embeddings = self.row_embedder(row_indices) col_embeddings = self.column_embedder(col_indices) # sum all embeddings together embeddings = embeddings + row_embeddings + col_embeddings embeddings = self.dropout(embeddings) return embeddings class Pix2StructVisionAttention(nn.Module): def __init__(self, config): super().__init__() self.hidden_size = config.hidden_size self.key_value_proj_dim = config.d_kv self.n_heads = config.num_attention_heads self.dropout = config.attention_dropout self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh TensorFlow initialization to avoid scaling before softmax self.query = nn.Linear(self.hidden_size, self.inner_dim, bias=False) self.key = nn.Linear(self.hidden_size, self.inner_dim, bias=False) self.value = nn.Linear(self.hidden_size, self.inner_dim, bias=False) self.output = nn.Linear(self.inner_dim, self.hidden_size, bias=False) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, ): """ Self-attention block """ # Input is (batch_size, seq_length, dim) # Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length) # past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head) batch_size, seq_length = hidden_states.shape[:2] def to_projection_shape(states): """projection""" return states.contiguous().view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) # get query states # (batch_size, n_heads, seq_length, dim_per_head) query_states = to_projection_shape(self.query(hidden_states)) # get key/value states key_states = to_projection_shape(self.key(hidden_states)) value_states = to_projection_shape(self.value(hidden_states)) # compute scores # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9 scores = torch.matmul(query_states, key_states.transpose(3, 2)) if position_bias is None: position_bias = torch.zeros( (1, self.n_heads, seq_length, seq_length), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length), device=scores.device, dtype=scores.dtype) if attention_mask.dim() == 2: position_bias = position_bias + attention_mask[:, None, None, :].to(position_bias.device) else: # (batch_size, n_heads, seq_length, key_length) position_bias = position_bias + attention_mask.to(position_bias.device) position_bias = 1 - position_bias position_bias_masked = position_bias.masked_fill(position_bias == 1, torch.finfo(scores.dtype).min) scores += position_bias_masked scores = torch.max(scores, torch.tensor(torch.finfo(scores.dtype).min)) # (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.softmax(scores, dim=-1, dtype=torch.float32).type_as(scores) # (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) # Mask heads if we want to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_output = torch.matmul(attn_weights, value_states) # (batch_size, seq_length, dim) attn_output = attn_output.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim) attn_output = self.output(attn_output) outputs = (attn_output,) + (position_bias,) if output_attentions: outputs = outputs + (attn_weights,) return outputs # Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5DenseGatedActDense->Pix2StructVisionMlp,T5Config->Pix2StructVisionConfig,config.d_model->config.hidden_size,dropout_rate->dropout_rate class Pix2StructVisionMlp(nn.Module): def __init__(self, config: Pix2StructVisionConfig): super().__init__() self.wi_0 = nn.Linear(config.hidden_size, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.hidden_size, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.hidden_size, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) # To make 8bit quantization work for google/flan-t5-xxl, self.wo is kept in float32. # See https://github.com/huggingface/transformers/issues/20287 # we also make sure the weights are not in `int8` in case users will force `_keep_in_fp32_modules` to be `None`` if ( isinstance(self.wo.weight, torch.Tensor) and hidden_states.dtype != self.wo.weight.dtype and self.wo.weight.dtype != torch.int8 ): hidden_states = hidden_states.to(self.wo.weight.dtype) hidden_states = self.wo(hidden_states) return hidden_states class Pix2StructVisionLayer(nn.Module): def __init__(self, config: Pix2StructConfig) -> None: super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = Pix2StructVisionAttention(config) self.mlp = Pix2StructVisionMlp(config) self.pre_mlp_layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pre_attention_layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: residual = hidden_states # in Pix2StructVision, layernorm is applied before self-attention hidden_states = self.pre_attention_layer_norm(hidden_states) self_attention_outputs = self.attention( hidden_states, attention_mask=attention_mask, layer_head_mask=head_mask, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights # first residual connection hidden_states = attention_output + residual # in Pix2StructVision, layernorm is also applied after self-attention layer_output = self.pre_mlp_layer_norm(hidden_states) layer_output = self.mlp(layer_output) + hidden_states # second residual connection outputs = (layer_output,) + outputs return outputs class Pix2StructVisionEncoder(nn.Module): def __init__(self, config: Pix2StructConfig) -> None: super().__init__() self.config = config self.layer = nn.ModuleList([Pix2StructVisionLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ) -> Union[tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, attention_mask, layer_head_mask, output_attentions, ) else: layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class Pix2StructPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = Pix2StructConfig @property def dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK) dummy_inputs = { "decoder_input_ids": input_ids, "input_ids": input_ids, "decoder_attention_mask": input_mask, } return dummy_inputs def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor # Used for testing weights initialization if isinstance(module, Pix2StructLayerNorm): module.weight.data.fill_(factor * 1.0) elif isinstance(module, Pix2StructTextDenseGatedActDense): hidden_size = ( self.config.text_config.hidden_size if isinstance(self.config, Pix2StructConfig) else self.config.hidden_size ) d_ff = self.config.text_config.d_ff if isinstance(self.config, Pix2StructConfig) else self.config.d_ff module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((hidden_size) ** -0.5)) if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None: module.wi_0.bias.data.zero_() module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((hidden_size) ** -0.5)) if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None: module.wi_1.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, Pix2StructTextAttention): # Mesh TensorFlow attention initialization to avoid scaling before softmax # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 hidden_size = ( self.config.text_config.hidden_size if isinstance(self.config, Pix2StructConfig) else self.config.hidden_size ) key_value_proj_dim = ( self.config.text_config.d_kv if isinstance(self.config, Pix2StructConfig) else self.config.hidden_size ) n_heads = ( self.config.text_config.num_heads if isinstance(self.config, Pix2StructConfig) else self.config.num_heads ) module.query.weight.data.normal_(mean=0.0, std=factor * ((hidden_size * key_value_proj_dim) ** -0.5)) module.key.weight.data.normal_(mean=0.0, std=factor * (hidden_size**-0.5)) module.value.weight.data.normal_(mean=0.0, std=factor * (hidden_size**-0.5)) module.output.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5)) if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((hidden_size) ** -0.5)) elif isinstance(module, nn.Embedding): hidden_size = ( self.config.text_config.hidden_size if isinstance(self.config, Pix2StructConfig) else self.config.hidden_size ) module.weight.data.normal_(mean=0.0, std=factor * ((hidden_size) ** -0.5)) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, Pix2StructTextModel): hidden_size = ( self.config.text_config.hidden_size if isinstance(self.config, Pix2StructConfig) else self.config.hidden_size ) module.lm_head.weight.data.normal_(mean=0.0, std=factor * ((hidden_size) ** -0.5)) elif isinstance(module, (nn.Linear, nn.Conv2d)): # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid # `trunc_normal_cpu` not implemented in `half` issues module.weight.data = nn.init.trunc_normal_( module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range ).to(module.weight.dtype) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, Pix2StructLayerNorm): if module.weight is not None: module.weight.data.fill_(1.0) elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right with T5->Pix2Struct def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id if decoder_start_token_id is None: raise ValueError( "self.model.config.decoder_start_token_id has to be defined. In Pix2Struct it is usually set to the pad_token_id. " "See Pix2Struct docs for more information." ) # shift inputs to the right if is_torch_fx_proxy(input_ids): # Item assignment is not supported natively for proxies. shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids PIX2STRUCT_VISION_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`Pix2StructConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ PIX2STRUCT_VISION_INPUTS_DOCSTRING = r""" Args: flattened_patches (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_channels x patch_height x patch_width)`): Flattened and padded pixel values. These values can be obtained using [`AutoImageProcessor`]. See [`Pix2StructVisionImageProcessor.__call__`] for details. Check the [original paper](https://arxiv.org/abs/2210.03347) (figure 5) for more details. attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`: head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare Pix2StructVision Model transformer outputting raw hidden-states without any specific head on top.", PIX2STRUCT_VISION_START_DOCSTRING, ) class Pix2StructVisionModel(Pix2StructPreTrainedModel): config_class = Pix2StructVisionConfig main_input_name = "flattened_patches" supports_gradient_checkpointing = True _no_split_modules = ["Pix2StructVisionLayer"] def __init__(self, config: Pix2StructConfig): super().__init__(config) self.config = config self.embeddings = Pix2StructVisionEmbeddings(config) self.encoder = Pix2StructVisionEncoder(config) self.layernorm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_eps) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.patch_projection def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None: """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(PIX2STRUCT_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC) def forward( self, flattened_patches: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: Example: ```python >>> import requests >>> from PIL import Image >>> from transformers import AutoProcessor, Pix2StructVisionModel >>> image_processor = AutoProcessor.from_pretrained("google/pix2struct-textcaps-base") >>> model = Pix2StructVisionModel.from_pretrained("google/pix2struct-textcaps-base") >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = image_processor(images=image, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 2048, 768] ``` """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if flattened_patches is None: raise ValueError("You have to specify flattened_patches") if attention_mask is None: # check where `flattened_patches` is not 0 attention_mask = (flattened_patches.sum(dim=-1) != 0).float() # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(flattened_patches) encoder_outputs = self.encoder( embedding_output, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) if not return_dict: head_outputs = (sequence_output,) return head_outputs + encoder_outputs[1:] return BaseModelOutput( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) # Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5->Pix2StructText,d_model->hidden_size class Pix2StructTextDenseGatedActDense(nn.Module): def __init__(self, config: Pix2StructTextConfig): super().__init__() self.wi_0 = nn.Linear(config.hidden_size, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.hidden_size, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.hidden_size, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) # To make 8bit quantization work for google/flan-t5-xxl, self.wo is kept in float32. # See https://github.com/huggingface/transformers/issues/20287 # we also make sure the weights are not in `int8` in case users will force `_keep_in_fp32_modules` to be `None`` if ( isinstance(self.wo.weight, torch.Tensor) and hidden_states.dtype != self.wo.weight.dtype and self.wo.weight.dtype != torch.int8 ): hidden_states = hidden_states.to(self.wo.weight.dtype) hidden_states = self.wo(hidden_states) return hidden_states class Pix2StructTextLayerFF(nn.Module): def __init__(self, config: Pix2StructTextConfig): super().__init__() self.DenseReluDense = Pix2StructTextDenseGatedActDense(config) self.layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) # Copied from transformers.models.t5.modeling_t5.T5LayerFF.forward def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states) return hidden_states class Pix2StructTextAttention(nn.Module): def __init__(self, config: Pix2StructTextConfig, has_relative_attention_bias=False): super().__init__() self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.hidden_size = config.hidden_size self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh TensorFlow initialization to avoid scaling before softmax self.query = nn.Linear(self.hidden_size, self.hidden_size, bias=False) self.key = nn.Linear(self.hidden_size, self.hidden_size, bias=False) self.value = nn.Linear(self.hidden_size, self.hidden_size, bias=False) self.output = nn.Linear(self.hidden_size, self.hidden_size, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False @staticmethod # Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance relative_position_if_large = max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets # Adapted from transformers.models.t5.modeling_t5.T5Attention.compute_bias def compute_bias(self, query_length, key_length, device=None): """Compute binned relative position bias""" if device is None: device = self.relative_attention_bias.weight.device context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None] memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :] relative_position = memory_position - context_position # shape (query_length, key_length) relative_position_bucket = self._relative_position_bucket( relative_position, # shape (query_length, key_length) bidirectional=False, num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads) values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length) return values def forward( self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_value=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, ): """ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). """ # Input is (batch_size, seq_length, dim) # Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length) # past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head) batch_size, seq_length = hidden_states.shape[:2] real_seq_length = seq_length if past_key_value is not None: if len(past_key_value) != 2: raise ValueError( f"past_key_value should have 2 past states: keys and values. Got { len(past_key_value)} past states" ) real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length key_length = real_seq_length if key_value_states is None else key_value_states.shape[1] def to_projection_shape(states): """projection""" return states.contiguous().view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) def project(hidden_states, proj_layer, key_value_states, past_key_value): """projects hidden states correctly to key/query states""" if key_value_states is None: # self-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = to_projection_shape(proj_layer(hidden_states)) elif past_key_value is None: # cross-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = to_projection_shape(proj_layer(key_value_states)) if past_key_value is not None: if key_value_states is None: # self-attn # (batch_size, n_heads, key_length, dim_per_head) hidden_states = torch.cat([past_key_value, hidden_states], dim=2) elif past_key_value.shape[2] != key_value_states.shape[1]: # checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning # cross-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = to_projection_shape(proj_layer(key_value_states)) else: # cross-attn hidden_states = past_key_value return hidden_states # get query states # (batch_size, n_heads, seq_length, dim_per_head) query_states = to_projection_shape(self.query(hidden_states)) # get key/value states key_states = project( hidden_states, self.key, key_value_states, past_key_value[0] if past_key_value is not None else None ) value_states = project( hidden_states, self.value, key_value_states, past_key_value[1] if past_key_value is not None else None ) # compute scores scores = torch.matmul( query_states, key_states.transpose(3, 2) ) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9 if position_bias is None: if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device) # if key and values are already calculated # we want only the last query position bias if past_key_value is not None: position_bias = position_bias[:, :, -hidden_states.size(1) :, :] if mask is not None: position_bias = position_bias + mask # (batch_size, n_heads, seq_length, key_length) if self.pruned_heads: mask = torch.ones(position_bias.shape[1]) mask[list(self.pruned_heads)] = 0 position_bias_masked = position_bias[:, mask.bool()] else: position_bias_masked = position_bias scores += position_bias_masked # (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) # (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) # Mask heads if we want to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_output = torch.matmul(attn_weights, value_states) # (batch_size, seq_length, dim) attn_output = attn_output.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim) attn_output = self.output(attn_output) present_key_value_state = (key_states, value_states) if use_cache else None outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) if output_attentions: outputs = outputs + (attn_weights,) return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5LayerNorm->Pix2StructLayerNorm,T5Attention->Pix2StructTextAttention,self.SelfAttention->self.attention,config.d_model->config.hidden_size class Pix2StructTextLayerSelfAttention(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.attention = Pix2StructTextAttention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.attention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerCrossAttention with T5LayerNorm->Pix2StructLayerNorm,T5Attention->Pix2StructTextAttention,self.EncDecAttention->self.attention,config.d_model->config.hidden_size class Pix2StructTextLayerCrossAttention(nn.Module): def __init__(self, config): super().__init__() self.attention = Pix2StructTextAttention(config, has_relative_attention_bias=False) self.layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, query_length=None, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.attention( normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, ) layer_output = hidden_states + self.dropout(attention_output[0]) outputs = (layer_output,) + attention_output[1:] # add attentions if we output them return outputs class Pix2StructTextBlock(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.self_attention = Pix2StructTextLayerSelfAttention( config, has_relative_attention_bias=has_relative_attention_bias ) self.encoder_decoder_attention = Pix2StructTextLayerCrossAttention(config) self.mlp = Pix2StructTextLayerFF(config) def forward( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, return_dict=True, ): if past_key_value is not None: expected_num_past_key_values = 2 if encoder_hidden_states is None else 4 if len(past_key_value) != expected_num_past_key_values: raise ValueError( f"There should be {expected_num_past_key_values} past states. " f"{'2 (past / key) for cross attention. ' if expected_num_past_key_values == 4 else ''}" f"Got {len(past_key_value)} past key / value states" ) self_attn_past_key_value = past_key_value[:2] cross_attn_past_key_value = past_key_value[2:] else: self_attn_past_key_value, cross_attn_past_key_value = None, None self_attention_outputs = self.self_attention( hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=self_attn_past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states, present_key_value_state = self_attention_outputs[:2] attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) do_cross_attention = encoder_hidden_states is not None if do_cross_attention: # the actual query length is unknown for cross attention # if using past key value states. Need to inject it here if present_key_value_state is not None: query_length = present_key_value_state[0].shape[2] else: query_length = None cross_attention_outputs = self.encoder_decoder_attention( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, query_length=query_length, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = cross_attention_outputs[0] # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) # Combine self attn and cross attn key value states if present_key_value_state is not None: present_key_value_state = present_key_value_state + cross_attention_outputs[1] # Keep cross-attention outputs and relative position weights attention_outputs = attention_outputs + cross_attention_outputs[2:] # Apply Feed Forward layer hidden_states = self.mlp(hidden_states) # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if use_cache: outputs = outputs + (present_key_value_state,) + attention_outputs else: outputs = outputs + attention_outputs return outputs PIX2STRUCT_START_DOCSTRING = r""" The Pix2Struct model was proposed in [Pix2Struct: Screenshot Parsing as Pretraining for Visual Language Understanding](https://arxiv.org/abs/2210.03347) by Kenton Lee, Mandar Joshi, Iulia Turc, Hexiang Hu, Fangyu Liu, Julian Eisenschlos, Urvashi Khandelwal, Peter Shaw, Ming-Wei Chang, Kristina Toutanova. It's an encoder decoder transformer pre-trained in a image-to-text setting. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (Union[`Pix2StructConfig`, `Pix2StructTextConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ PIX2STRUCT_TEXT_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Pix2StructText is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [Pix2StructText Training](./t5#training). attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) Pix2StructText uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [Pix2StructText Training](./t5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention layers. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ PIX2STRUCT_INPUTS_DOCSTRING = r""" Args: flattened_patches (`torch.FloatTensor` of shape `(batch_size, seq_length, hidden_size)`): Flattened pixel patches. the `hidden_size` is obtained by the following formula: `hidden_size` = `num_channels` * `patch_size` * `patch_size` The process of flattening the pixel patches is done by `Pix2StructProcessor`. attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) Pix2StructText uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [Pix2StructText Training](./t5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention layers. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss for the decoder. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The standalone text decoder of Pix2Struct", PIX2STRUCT_START_DOCSTRING, ) class Pix2StructTextModel(Pix2StructPreTrainedModel): config_class = Pix2StructTextConfig _no_split_modules = ["Pix2StructTextBlock"] _tied_weights_keys = ["lm_head.weight"] supports_gradient_checkpointing = True def __init__(self, config): super().__init__(config) self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size) self.layer = nn.ModuleList( [Pix2StructTextBlock(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)] ) self.final_layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() self.gradient_checkpointing = False # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._reorder_cache def _reorder_cache(self, past_key_values, beam_idx): # if decoder past is not included in output # speedy decoding is disabled and no need to reorder if past_key_values is None: logger.warning("You might want to consider setting `use_cache=True` to speed up decoding") return past_key_values reordered_decoder_past = () for layer_past_states in past_key_values: # get the correct batch idx from layer past batch dim # batch dim of `past` is at 2nd position reordered_layer_past_states = () for layer_past_state in layer_past_states: # need to set correct `past` for each of the four key / value states reordered_layer_past_states = reordered_layer_past_states + ( layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)), ) if reordered_layer_past_states[0].shape != layer_past_states[0].shape: raise ValueError( f"reordered_layer_past_states[0] shape {reordered_layer_past_states[0].shape} and layer_past_states[0] shape {layer_past_states[0].shape} mismatched" ) if len(reordered_layer_past_states) != len(layer_past_states): raise ValueError( f"length of reordered_layer_past_states {len(reordered_layer_past_states)} and length of layer_past_states {len(layer_past_states)} mismatched" ) reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,) return reordered_decoder_past def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings @add_start_docstrings_to_model_forward(PIX2STRUCT_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[Tuple[torch.FloatTensor, ...], CausalLMOutputWithCrossAttentions]: r""" Returns: Example: ```python >>> from transformers import AutoProcessor, Pix2StructTextModel >>> processor = AutoProcessor.from_pretrained("google/pix2struct-textcaps-base") >>> model = Pix2StructTextModel.from_pretrained("google/pix2struct-textcaps-base") >>> inputs = processor(text="Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> loss = outputs.loss ``` """ use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") if inputs_embeds is None: assert self.embed_tokens is not None, "You have to initialize the model with valid token embeddings" inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape # required mask seq length can be calculated via length of past mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length if attention_mask is None: attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) if encoder_attention_mask is None and encoder_hidden_states is not None: encoder_seq_length = encoder_hidden_states.shape[1] encoder_attention_mask = torch.ones( batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long ) # initialize past_key_values with `None` if past does not exist if past_key_values is None: past_key_values = [None] * len(self.layer) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed head_mask = self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers) present_key_value_states = () if use_cache else None all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if (output_attentions) else None position_bias = None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds) for i, (layer_module, past_key_value) in enumerate(zip(self.layer, past_key_values)): layer_head_mask = head_mask[i] cross_attn_layer_head_mask = cross_attn_head_mask[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: if use_cache: logger.warning( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False layer_outputs = self._gradient_checkpointing_func( layer_module.forward, hidden_states, extended_attention_mask, position_bias, encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias, layer_head_mask, cross_attn_layer_head_mask, None, # past_key_value is always None with gradient checkpointing use_cache, output_attentions, ) else: layer_outputs = layer_module( hidden_states, attention_mask=extended_attention_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) # layer_outputs is a tuple with: # hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) if use_cache is False: layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:] hidden_states, present_key_value_state = layer_outputs[:2] # We share the position biases between the layers - the first layer store them # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights), # (cross-attention position bias), (cross-attention weights) position_bias = layer_outputs[2] if encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3] # append next layer key value states if use_cache: present_key_value_states = present_key_value_states + (present_key_value_state,) if output_attentions: all_attentions = all_attentions + (layer_outputs[3],) if encoder_hidden_states is not None: all_cross_attentions = all_cross_attentions + (layer_outputs[5],) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) logits = self.lm_head(hidden_states) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(logits.device) loss_fct = nn.CrossEntropyLoss(ignore_index=-100, reduction="mean") loss = loss_fct(logits.contiguous().view(-1, logits.size(-1)), labels.contiguous().view(-1)) if not return_dict: return tuple( v for v in [ loss, logits, present_key_value_states, all_hidden_states, all_attentions, all_cross_attentions, ] if v is not None ) return CausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) @add_start_docstrings( "A conditional generation model with a language modeling head. Can be used for sequence generation tasks.", PIX2STRUCT_START_DOCSTRING, ) class Pix2StructForConditionalGeneration(Pix2StructPreTrainedModel): config_class = Pix2StructConfig main_input_name = "flattened_patches" _tied_weights_keys = ["decoder.lm_head.weight"] def __init__(self, config: Pix2StructConfig): super().__init__(config) self.encoder = Pix2StructVisionModel(config.vision_config) self.decoder = Pix2StructTextModel(config.text_config) self.is_vqa = config.is_vqa # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.decoder.get_input_embeddings() def set_input_embeddings(self, new_embeddings): self.decoder.set_input_embeddings(new_embeddings) def get_output_embeddings(self) -> nn.Module: return self.decoder.get_output_embeddings() def set_output_embeddings(self, new_embeddings): self.decoder.set_output_embeddings(new_embeddings) def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding: model_embeds = self.decoder.resize_token_embeddings(new_num_tokens) # update vocab size self.config.text_config.vocab_size = new_num_tokens return model_embeds def get_decoder(self): return self.decoder def get_encoder(self): return self.encoder @add_start_docstrings_to_model_forward(PIX2STRUCT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, flattened_patches: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, labels: Optional[torch.LongTensor] = None, decoder_inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]: r""" Returns: Example: Inference: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, Pix2StructForConditionalGeneration >>> processor = AutoProcessor.from_pretrained("google/pix2struct-textcaps-base") >>> model = Pix2StructForConditionalGeneration.from_pretrained("google/pix2struct-textcaps-base") >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> # autoregressive generation >>> generated_ids = model.generate(**inputs, max_new_tokens=50) >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] >>> print(generated_text) A stop sign is on a street corner. >>> # conditional generation >>> text = "A picture of" >>> inputs = processor(text=text, images=image, return_tensors="pt", add_special_tokens=False) >>> generated_ids = model.generate(**inputs, max_new_tokens=50) >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] >>> print(generated_text) A picture of a stop sign with a red stop sign ``` Training: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, Pix2StructForConditionalGeneration >>> processor = AutoProcessor.from_pretrained("google/pix2struct-base") >>> model = Pix2StructForConditionalGeneration.from_pretrained("google/pix2struct-base") >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> text = "A stop sign is on the street corner." >>> inputs = processor(images=image, return_tensors="pt") >>> labels = processor(text=text, return_tensors="pt").input_ids >>> # forward pass >>> outputs = model(**inputs, labels=labels) >>> loss = outputs.loss >>> print(f"{loss.item():.5f}") 5.94282 ```""" use_cache = use_cache if use_cache is not None else self.config.text_config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Encode if needed (training, first prediction pass) if encoder_outputs is None: encoder_outputs = self.encoder( flattened_patches=flattened_patches, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None: # get decoder inputs from shifting lm labels to the right decoder_input_ids = self._shift_right(labels) decoder_attention_mask = ( decoder_attention_mask if decoder_attention_mask is not None else decoder_input_ids.ne(self.config.pad_token_id).float() ) # Always attend to the first token decoder_attention_mask[:, 0] = 1 # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, labels=labels, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqLMOutput( loss=decoder_outputs.loss, logits=decoder_outputs.logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, flattened_patches: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, past_key_values=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs, ): if decoder_attention_mask is None: decoder_attention_mask = torch.ones_like(input_ids).to(input_ids.device) # cut decoder_input_ids if past_key_values is used if past_key_values is not None: past_length = past_key_values[0][0].shape[2] # Some generation methods already pass only the last input ID if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: # Default to old behavior: keep only final ID remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] return { "flattened_patches": flattened_patches, "decoder_input_ids": input_ids, "past_key_values": past_key_values, "encoder_outputs": encoder_outputs, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, "use_cache": use_cache, }
transformers/src/transformers/models/pix2struct/modeling_pix2struct.py/0
{ "file_path": "transformers/src/transformers/models/pix2struct/modeling_pix2struct.py", "repo_id": "transformers", "token_count": 35764 }
354
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Feature extractor class for Pop2Piano""" import warnings from typing import List, Optional, Union import numpy import numpy as np from ...audio_utils import mel_filter_bank, spectrogram from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import ( TensorType, is_essentia_available, is_librosa_available, is_scipy_available, logging, requires_backends, ) if is_essentia_available(): import essentia import essentia.standard if is_librosa_available(): import librosa if is_scipy_available(): import scipy logger = logging.get_logger(__name__) class Pop2PianoFeatureExtractor(SequenceFeatureExtractor): r""" Constructs a Pop2Piano feature extractor. This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. This class extracts rhythm and preprocesses the audio before it is passed to the model. First the audio is passed to `RhythmExtractor2013` algorithm which extracts the beat_times, beat positions and estimates their confidence as well as tempo in bpm, then beat_times is interpolated and to get beatsteps. Later we calculate extrapolated_beatsteps from it to be used in tokenizer. On the other hand audio is resampled to self.sampling_rate and preprocessed and then log mel spectogram is computed from that to be used in our transformer model. Args: sampling_rate (`int`, *optional*, defaults to 22050): Target Sampling rate of audio signal. It's the sampling rate that we forward to the model. padding_value (`int`, *optional*, defaults to 0): Padding value used to pad the audio. Should correspond to silences. window_size (`int`, *optional*, defaults to 4096): Length of the window in samples to which the Fourier transform is applied. hop_length (`int`, *optional*, defaults to 1024): Step size between each window of the waveform, in samples. min_frequency (`float`, *optional*, defaults to 10.0): Lowest frequency that will be used in the log-mel spectrogram. feature_size (`int`, *optional*, defaults to 512): The feature dimension of the extracted features. num_bars (`int`, *optional*, defaults to 2): Determines interval between each sequence. """ model_input_names = ["input_features", "beatsteps", "extrapolated_beatstep"] def __init__( self, sampling_rate: int = 22050, padding_value: int = 0, window_size: int = 4096, hop_length: int = 1024, min_frequency: float = 10.0, feature_size: int = 512, num_bars: int = 2, **kwargs, ): super().__init__( feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs, ) self.sampling_rate = sampling_rate self.padding_value = padding_value self.window_size = window_size self.hop_length = hop_length self.min_frequency = min_frequency self.feature_size = feature_size self.num_bars = num_bars self.mel_filters = mel_filter_bank( num_frequency_bins=(self.window_size // 2) + 1, num_mel_filters=self.feature_size, min_frequency=self.min_frequency, max_frequency=float(self.sampling_rate // 2), sampling_rate=self.sampling_rate, norm=None, mel_scale="htk", ) def mel_spectrogram(self, sequence: np.ndarray): """ Generates MelSpectrogram. Args: sequence (`numpy.ndarray`): The sequence of which the mel-spectrogram will be computed. """ mel_specs = [] for seq in sequence: window = np.hanning(self.window_size + 1)[:-1] mel_specs.append( spectrogram( waveform=seq, window=window, frame_length=self.window_size, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters, ) ) mel_specs = np.array(mel_specs) return mel_specs def extract_rhythm(self, audio: np.ndarray): """ This algorithm(`RhythmExtractor2013`) extracts the beat positions and estimates their confidence as well as tempo in bpm for an audio signal. For more information please visit https://essentia.upf.edu/reference/std_RhythmExtractor2013.html . Args: audio(`numpy.ndarray`): raw audio waveform which is passed to the Rhythm Extractor. """ requires_backends(self, ["essentia"]) essentia_tracker = essentia.standard.RhythmExtractor2013(method="multifeature") bpm, beat_times, confidence, estimates, essentia_beat_intervals = essentia_tracker(audio) return bpm, beat_times, confidence, estimates, essentia_beat_intervals def interpolate_beat_times( self, beat_times: numpy.ndarray, steps_per_beat: numpy.ndarray, n_extend: numpy.ndarray ): """ This method takes beat_times and then interpolates that using `scipy.interpolate.interp1d` and the output is then used to convert raw audio to log-mel-spectrogram. Args: beat_times (`numpy.ndarray`): beat_times is passed into `scipy.interpolate.interp1d` for processing. steps_per_beat (`int`): used as an parameter to control the interpolation. n_extend (`int`): used as an parameter to control the interpolation. """ requires_backends(self, ["scipy"]) beat_times_function = scipy.interpolate.interp1d( np.arange(beat_times.size), beat_times, bounds_error=False, fill_value="extrapolate", ) ext_beats = beat_times_function( np.linspace(0, beat_times.size + n_extend - 1, beat_times.size * steps_per_beat + n_extend) ) return ext_beats def preprocess_mel(self, audio: np.ndarray, beatstep: np.ndarray): """ Preprocessing for log-mel-spectrogram Args: audio (`numpy.ndarray` of shape `(audio_length, )` ): Raw audio waveform to be processed. beatstep (`numpy.ndarray`): Interpolated values of the raw audio. If beatstep[0] is greater than 0.0, then it will be shifted by the value at beatstep[0]. """ if audio is not None and len(audio.shape) != 1: raise ValueError( f"Expected `audio` to be a single channel audio input of shape `(n, )` but found shape {audio.shape}." ) if beatstep[0] > 0.0: beatstep = beatstep - beatstep[0] num_steps = self.num_bars * 4 num_target_steps = len(beatstep) extrapolated_beatstep = self.interpolate_beat_times( beat_times=beatstep, steps_per_beat=1, n_extend=(self.num_bars + 1) * 4 + 1 ) sample_indices = [] max_feature_length = 0 for i in range(0, num_target_steps, num_steps): start_idx = i end_idx = min(i + num_steps, num_target_steps) start_sample = int(extrapolated_beatstep[start_idx] * self.sampling_rate) end_sample = int(extrapolated_beatstep[end_idx] * self.sampling_rate) sample_indices.append((start_sample, end_sample)) max_feature_length = max(max_feature_length, end_sample - start_sample) padded_batch = [] for start_sample, end_sample in sample_indices: feature = audio[start_sample:end_sample] padded_feature = np.pad( feature, ((0, max_feature_length - feature.shape[0]),), "constant", constant_values=0, ) padded_batch.append(padded_feature) padded_batch = np.asarray(padded_batch) return padded_batch, extrapolated_beatstep def _pad(self, features: np.ndarray, add_zero_line=True): features_shapes = [each_feature.shape for each_feature in features] attention_masks, padded_features = [], [] for i, each_feature in enumerate(features): # To pad "input_features". if len(each_feature.shape) == 3: features_pad_value = max([*zip(*features_shapes)][1]) - features_shapes[i][1] attention_mask = np.ones(features_shapes[i][:2], dtype=np.int64) feature_padding = ((0, 0), (0, features_pad_value), (0, 0)) attention_mask_padding = (feature_padding[0], feature_padding[1]) # To pad "beatsteps" and "extrapolated_beatstep". else: each_feature = each_feature.reshape(1, -1) features_pad_value = max([*zip(*features_shapes)][0]) - features_shapes[i][0] attention_mask = np.ones(features_shapes[i], dtype=np.int64).reshape(1, -1) feature_padding = attention_mask_padding = ((0, 0), (0, features_pad_value)) each_padded_feature = np.pad(each_feature, feature_padding, "constant", constant_values=self.padding_value) attention_mask = np.pad( attention_mask, attention_mask_padding, "constant", constant_values=self.padding_value ) if add_zero_line: # if it is batched then we seperate each examples using zero array zero_array_len = max([*zip(*features_shapes)][1]) # we concatenate the zero array line here each_padded_feature = np.concatenate( [each_padded_feature, np.zeros([1, zero_array_len, self.feature_size])], axis=0 ) attention_mask = np.concatenate( [attention_mask, np.zeros([1, zero_array_len], dtype=attention_mask.dtype)], axis=0 ) padded_features.append(each_padded_feature) attention_masks.append(attention_mask) padded_features = np.concatenate(padded_features, axis=0).astype(np.float32) attention_masks = np.concatenate(attention_masks, axis=0).astype(np.int64) return padded_features, attention_masks def pad( self, inputs: BatchFeature, is_batched: bool, return_attention_mask: bool, return_tensors: Optional[Union[str, TensorType]] = None, ): """ Pads the inputs to same length and returns attention_mask. Args: inputs (`BatchFeature`): Processed audio features. is_batched (`bool`): Whether inputs are batched or not. return_attention_mask (`bool`): Whether to return attention mask or not. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. If nothing is specified, it will return list of `np.ndarray` arrays. Return: `BatchFeature` with attention_mask, attention_mask_beatsteps and attention_mask_extrapolated_beatstep added to it: - **attention_mask** numpy.ndarray of shape `(batch_size, max_input_features_seq_length)` -- Example : 1, 1, 1, 0, 0 (audio 1, also here it is padded to max length of 5 thats why there are 2 zeros at the end indicating they are padded) 0, 0, 0, 0, 0 (zero pad to seperate audio 1 and 2) 1, 1, 1, 1, 1 (audio 2) 0, 0, 0, 0, 0 (zero pad to seperate audio 2 and 3) 1, 1, 1, 1, 1 (audio 3) - **attention_mask_beatsteps** numpy.ndarray of shape `(batch_size, max_beatsteps_seq_length)` - **attention_mask_extrapolated_beatstep** numpy.ndarray of shape `(batch_size, max_extrapolated_beatstep_seq_length)` """ processed_features_dict = {} for feature_name, feature_value in inputs.items(): if feature_name == "input_features": padded_feature_values, attention_mask = self._pad(feature_value, add_zero_line=True) processed_features_dict[feature_name] = padded_feature_values if return_attention_mask: processed_features_dict["attention_mask"] = attention_mask else: padded_feature_values, attention_mask = self._pad(feature_value, add_zero_line=False) processed_features_dict[feature_name] = padded_feature_values if return_attention_mask: processed_features_dict[f"attention_mask_{feature_name}"] = attention_mask # If we are processing only one example, we should remove the zero array line since we don't need it to # seperate examples from each other. if not is_batched and not return_attention_mask: processed_features_dict["input_features"] = processed_features_dict["input_features"][:-1, ...] outputs = BatchFeature(processed_features_dict, tensor_type=return_tensors) return outputs def __call__( self, audio: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], sampling_rate: Union[int, List[int]], steps_per_beat: int = 2, resample: Optional[bool] = True, return_attention_mask: Optional[bool] = False, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ) -> BatchFeature: """ Main method to featurize and prepare for the model. Args: audio (`np.ndarray`, `List`): The audio or batch of audio to be processed. Each audio can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values. sampling_rate (`int`): The sampling rate at which the `audio` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors. steps_per_beat (`int`, *optional*, defaults to 2): This is used in interpolating `beat_times`. resample (`bool`, *optional*, defaults to `True`): Determines whether to resample the audio to `sampling_rate` or not before processing. Must be True during inference. return_attention_mask (`bool` *optional*, defaults to `False`): Denotes if attention_mask for input_features, beatsteps and extrapolated_beatstep will be given as output or not. Automatically set to True for batched inputs. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. If nothing is specified, it will return list of `np.ndarray` arrays. """ requires_backends(self, ["librosa"]) is_batched = bool(isinstance(audio, (list, tuple)) and isinstance(audio[0], (np.ndarray, tuple, list))) if is_batched: # This enables the user to process files of different sampling_rate at same time if not isinstance(sampling_rate, list): raise ValueError( "Please give sampling_rate of each audio separately when you are passing multiple raw_audios at the same time. " f"Received {sampling_rate}, expected [audio_1_sr, ..., audio_n_sr]." ) return_attention_mask = True if return_attention_mask is None else return_attention_mask else: audio = [audio] sampling_rate = [sampling_rate] return_attention_mask = False if return_attention_mask is None else return_attention_mask batch_input_features, batch_beatsteps, batch_ext_beatstep = [], [], [] for single_raw_audio, single_sampling_rate in zip(audio, sampling_rate): bpm, beat_times, confidence, estimates, essentia_beat_intervals = self.extract_rhythm( audio=single_raw_audio ) beatsteps = self.interpolate_beat_times(beat_times=beat_times, steps_per_beat=steps_per_beat, n_extend=1) if self.sampling_rate != single_sampling_rate and self.sampling_rate is not None: if resample: # Change sampling_rate to self.sampling_rate single_raw_audio = librosa.core.resample( single_raw_audio, orig_sr=single_sampling_rate, target_sr=self.sampling_rate, res_type="kaiser_best", ) else: warnings.warn( f"The sampling_rate of the provided audio is different from the target sampling_rate " f"of the Feature Extractor, {self.sampling_rate} vs {single_sampling_rate}. " f"In these cases it is recommended to use `resample=True` in the `__call__` method to " f"get the optimal behaviour." ) single_sampling_rate = self.sampling_rate start_sample = int(beatsteps[0] * single_sampling_rate) end_sample = int(beatsteps[-1] * single_sampling_rate) input_features, extrapolated_beatstep = self.preprocess_mel( single_raw_audio[start_sample:end_sample], beatsteps - beatsteps[0] ) mel_specs = self.mel_spectrogram(input_features.astype(np.float32)) # apply np.log to get log mel-spectrograms log_mel_specs = np.log(np.clip(mel_specs, a_min=1e-6, a_max=None)) input_features = np.transpose(log_mel_specs, (0, -1, -2)) batch_input_features.append(input_features) batch_beatsteps.append(beatsteps) batch_ext_beatstep.append(extrapolated_beatstep) output = BatchFeature( { "input_features": batch_input_features, "beatsteps": batch_beatsteps, "extrapolated_beatstep": batch_ext_beatstep, } ) output = self.pad( output, is_batched=is_batched, return_attention_mask=return_attention_mask, return_tensors=return_tensors, ) return output
transformers/src/transformers/models/pop2piano/feature_extraction_pop2piano.py/0
{ "file_path": "transformers/src/transformers/models/pop2piano/feature_extraction_pop2piano.py", "repo_id": "transformers", "token_count": 8827 }
355
# coding=utf-8 # Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ RemBERT model configuration""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging logger = logging.get_logger(__name__) REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = { "google/rembert": "https://huggingface.co/google/rembert/resolve/main/config.json", # See all RemBERT models at https://huggingface.co/models?filter=rembert } class RemBertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`RemBertModel`]. It is used to instantiate an RemBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the RemBERT [google/rembert](https://huggingface.co/google/rembert) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 250300): Vocabulary size of the RemBERT model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`RemBertModel`] or [`TFRemBertModel`]. Vocabulary size of the model. Defines the different tokens that can be represented by the *inputs_ids* passed to the forward method of [`RemBertModel`]. hidden_size (`int`, *optional*, defaults to 1152): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 18): Number of attention heads for each attention layer in the Transformer encoder. input_embedding_size (`int`, *optional*, defaults to 256): Dimensionality of the input embeddings. output_embedding_size (`int`, *optional*, defaults to 1664): Dimensionality of the output embeddings. intermediate_size (`int`, *optional*, defaults to 4608): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0): The dropout ratio for the attention probabilities. classifier_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the classifier layer when fine-tuning. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`RemBertModel`] or [`TFRemBertModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. is_decoder (`bool`, *optional*, defaults to `False`): Whether the model is used as a decoder or not. If `False`, the model is used as an encoder. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. Example: ```python >>> from transformers import RemBertModel, RemBertConfig >>> # Initializing a RemBERT rembert style configuration >>> configuration = RemBertConfig() >>> # Initializing a model from the rembert style configuration >>> model = RemBertModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "rembert" def __init__( self, vocab_size=250300, hidden_size=1152, num_hidden_layers=32, num_attention_heads=18, input_embedding_size=256, output_embedding_size=1664, intermediate_size=4608, hidden_act="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, classifier_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, use_cache=True, pad_token_id=0, bos_token_id=312, eos_token_id=313, **kwargs, ): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.input_embedding_size = input_embedding_size self.output_embedding_size = output_embedding_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.classifier_dropout_prob = classifier_dropout_prob self.initializer_range = initializer_range self.type_vocab_size = type_vocab_size self.layer_norm_eps = layer_norm_eps self.use_cache = use_cache self.tie_word_embeddings = False class RemBertOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"} else: dynamic_axis = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] ) @property def atol_for_validation(self) -> float: return 1e-4
transformers/src/transformers/models/rembert/configuration_rembert.py/0
{ "file_path": "transformers/src/transformers/models/rembert/configuration_rembert.py", "repo_id": "transformers", "token_count": 2855 }
356
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch RoBERTa model.""" import math from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN, gelu from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_roberta import RobertaConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "roberta-base" _CONFIG_FOR_DOC = "RobertaConfig" ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [ "roberta-base", "roberta-large", "roberta-large-mnli", "distilroberta-base", "roberta-base-openai-detector", "roberta-large-openai-detector", # See all RoBERTa models at https://huggingface.co/models?filter=roberta ] class RobertaEmbeddings(nn.Module): """ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. """ # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__ def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False ) # End copy self.padding_idx = config.pad_token_id self.position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx ) def forward( self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 ): if position_ids is None: if input_ids is not None: # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings def create_position_ids_from_inputs_embeds(self, inputs_embeds): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange( self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device ) return position_ids.unsqueeze(0).expand(input_shape) # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Roberta class RobertaSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr( config, "position_embedding_type", "absolute" ) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) use_cache = past_key_value is not None if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": query_length, key_length = query_layer.shape[2], key_layer.shape[2] if use_cache: position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view( -1, 1 ) else: position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in RobertaModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput class RobertaSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Roberta class RobertaAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = RobertaSelfAttention(config, position_embedding_type=position_embedding_type) self.output = RobertaSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate class RobertaIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput class RobertaOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Roberta class RobertaLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = RobertaAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = RobertaAttention(config, position_embedding_type="absolute") self.intermediate = RobertaIntermediate(config) self.output = RobertaOutput(config) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" " by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Roberta class RobertaEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([RobertaLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) # Copied from transformers.models.bert.modeling_bert.BertPooler class RobertaPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class RobertaPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = RobertaConfig base_model_prefix = "roberta" supports_gradient_checkpointing = True _no_split_modules = ["RobertaEmbeddings", "RobertaSelfAttention"] # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) ROBERTA_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`RobertaConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ ROBERTA_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value >= 2. All the value in this tensor should be always < type_vocab_size. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.", ROBERTA_START_DOCSTRING, ) class RobertaModel(RobertaPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in *Attention is all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. .. _*Attention is all you need*: https://arxiv.org/abs/1706.03762 """ # Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->Roberta def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = RobertaEmbeddings(config) self.encoder = RobertaEncoder(config) self.pooler = RobertaPooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) # Copied from transformers.models.bert.modeling_bert.BertModel.forward def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) if token_type_ids is None: if hasattr(self.embeddings, "token_type_ids"): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) @add_start_docstrings( """RoBERTa Model with a `language modeling` head on top for CLM fine-tuning.""", ROBERTA_START_DOCSTRING ) class RobertaForCausalLM(RobertaPreTrainedModel): _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"] def __init__(self, config): super().__init__(config) if not config.is_decoder: logger.warning("If you want to use `RobertaLMHeadModel` as a standalone, add `is_decoder=True.`") self.roberta = RobertaModel(config, add_pooling_layer=False) self.lm_head = RobertaLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, past_key_values: Tuple[Tuple[torch.FloatTensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). Returns: Example: ```python >>> from transformers import AutoTokenizer, RobertaForCausalLM, AutoConfig >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("roberta-base") >>> config = AutoConfig.from_pretrained("roberta-base") >>> config.is_decoder = True >>> model = RobertaForCausalLM.from_pretrained("roberta-base", config=config) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False outputs = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) lm_loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(prediction_scores.device) # we are doing next-token prediction; shift prediction scores and input ids by one shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((lm_loss,) + output) if lm_loss is not None else output return CausalLMOutputWithCrossAttentions( loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs): input_shape = input_ids.shape # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: attention_mask = input_ids.new_ones(input_shape) # cut decoder_input_ids if past_key_values is used if past_key_values is not None: past_length = past_key_values[0][0].shape[2] # Some generation methods already pass only the last input ID if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: # Default to old behavior: keep only final ID remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values} def _reorder_cache(self, past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past @add_start_docstrings("""RoBERTa Model with a `language modeling` head on top.""", ROBERTA_START_DOCSTRING) class RobertaForMaskedLM(RobertaPreTrainedModel): _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"] def __init__(self, config): super().__init__(config) if config.is_decoder: logger.warning( "If you want to use `RobertaForMaskedLM` make sure `config.is_decoder=False` for " "bi-directional self-attention." ) self.roberta = RobertaModel(config, add_pooling_layer=False) self.lm_head = RobertaLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, mask="<mask>", expected_output="' Paris'", expected_loss=0.1, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` kwargs (`Dict[str, any]`, optional, defaults to *{}*): Used to hide legacy arguments that have been deprecated. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) masked_lm_loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(prediction_scores.device) loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class RobertaLMHead(nn.Module): """Roberta Head for masked language modeling.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.decoder = nn.Linear(config.hidden_size, config.vocab_size) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def forward(self, features, **kwargs): x = self.dense(features) x = gelu(x) x = self.layer_norm(x) # project back to size of vocabulary with bias x = self.decoder(x) return x def _tie_weights(self): # To tie those two weights if they get disconnected (on TPU or when the bias is resized) # For accelerate compatibility and to not break backward compatibility if self.decoder.bias.device.type == "meta": self.decoder.bias = self.bias else: self.bias = self.decoder.bias @add_start_docstrings( """ RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, ROBERTA_START_DOCSTRING, ) class RobertaForSequenceClassification(RobertaPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.roberta = RobertaModel(config, add_pooling_layer=False) self.classifier = RobertaClassificationHead(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint="cardiffnlp/twitter-roberta-base-emotion", output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output="'optimism'", expected_loss=0.08, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, ROBERTA_START_DOCSTRING, ) class RobertaForMultipleChoice(RobertaPreTrainedModel): def __init__(self, config): super().__init__(config) self.roberta = RobertaModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None flat_inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.roberta( flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids, attention_mask=flat_attention_mask, head_mask=head_mask, inputs_embeds=flat_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(reshaped_logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ Roberta Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, ROBERTA_START_DOCSTRING, ) class RobertaForTokenClassification(RobertaPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.roberta = RobertaModel(config, add_pooling_layer=False) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint="Jean-Baptiste/roberta-large-ner-english", output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output="['O', 'ORG', 'ORG', 'O', 'O', 'O', 'O', 'O', 'LOC', 'O', 'LOC', 'LOC']", expected_loss=0.01, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class RobertaClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = torch.tanh(x) x = self.dropout(x) x = self.out_proj(x) return x @add_start_docstrings( """ Roberta Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, ROBERTA_START_DOCSTRING, ) class RobertaForQuestionAnswering(RobertaPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.roberta = RobertaModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint="deepset/roberta-base-squad2", output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, expected_output="' puppet'", expected_loss=0.86, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: torch.Tensor x: Returns: torch.Tensor """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx
transformers/src/transformers/models/roberta/modeling_roberta.py/0
{ "file_path": "transformers/src/transformers/models/roberta/modeling_roberta.py", "repo_id": "transformers", "token_count": 30190 }
357
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert RoFormer checkpoint.""" import argparse import torch from transformers import RoFormerConfig, RoFormerForMaskedLM, load_tf_weights_in_roformer from transformers.utils import logging logging.set_verbosity_info() def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path): # Initialise PyTorch model config = RoFormerConfig.from_json_file(bert_config_file) print(f"Building PyTorch model from configuration: {config}") model = RoFormerForMaskedLM(config) # Load weights from tf checkpoint load_tf_weights_in_roformer(model, config, tf_checkpoint_path) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}") torch.save(model.state_dict(), pytorch_dump_path, _use_new_zipfile_serialization=False) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--bert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
transformers/src/transformers/models/roformer/convert_roformer_original_tf_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/roformer/convert_roformer_original_tf_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 776 }
358
# coding=utf-8 # Copyright 2023 The Meta AI Authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TensorFlow SAM model. This file was mostly generated by auto-translation from the PyTorch original. In the event of a discrepancy, the original file should be regarded as the 'reference' version. """ from __future__ import annotations import collections from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import ACT2FN from ...modeling_tf_outputs import TFBaseModelOutput from ...modeling_tf_utils import TFModelInputType, TFPreTrainedModel, keras, shape_list, unpack_inputs from ...tf_utils import flatten, functional_layernorm from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_sam import SamConfig, SamMaskDecoderConfig, SamPromptEncoderConfig, SamVisionConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "SamConfig" _CHECKPOINT_FOR_DOC = "facebook/sam-vit-huge" TF_SAM_PRETRAINED_MODEL_ARCHIVE_LIST = [ "facebook/sam-vit-huge", "facebook/sam-vit-large", "facebook/sam-vit-base", # See all SAM models at https://huggingface.co/models?filter=sam ] @dataclass class TFSamVisionEncoderOutput(ModelOutput): """ Base class for sam vision model's outputs that also contains image embeddings obtained by applying the projection layer to the pooler_output. Args: image_embeds (`tf.Tensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): The image embeddings obtained by applying the projection layer to the pooler_output. last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ image_embeds: tf.Tensor | None = None last_hidden_state: tf.Tensor = None hidden_states: Tuple[tf.Tensor, ...] | None = None attentions: Tuple[tf.Tensor, ...] | None = None @dataclass class TFSamImageSegmentationOutput(ModelOutput): """ Base class for Segment-Anything model's output Args: iou_scores (`tf.Tensor` of shape `(batch_size, num_masks)`): The iou scores of the predicted masks. pred_masks (`tf.Tensor` of shape `(batch_size, num_masks, height, width)`): The predicted low resolutions masks. Needs to be post-processed by the processor vision_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the vision model at the output of each layer plus the optional initial embedding outputs. vision_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. mask_decoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ iou_scores: tf.Tensor = None pred_masks: tf.Tensor = None vision_hidden_states: Tuple[tf.Tensor, ...] | None = None vision_attentions: Tuple[tf.Tensor, ...] | None = None mask_decoder_attentions: Tuple[tf.Tensor, ...] | None = None class TFSamPatchEmbeddings(keras.layers.Layer): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config, **kwargs): super().__init__(**kwargs) image_size, patch_size = config.image_size, config.patch_size num_channels, hidden_size = config.num_channels, config.hidden_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches self.projection = keras.layers.Conv2D( hidden_size, kernel_size=patch_size, strides=patch_size, name="projection" ) def call(self, pixel_values): batch_size, num_channels, height, width = shape_list(pixel_values) if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) if height != self.image_size[0] or width != self.image_size[1]: raise ValueError( f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})." ) embeddings = self.projection(tf.transpose(pixel_values, perm=[0, 2, 3, 1])) return embeddings def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "projection", None) is not None: with tf.name_scope(self.projection.name): self.projection.build([None, None, None, self.num_channels]) class TFSamMLPBlock(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.lin1 = keras.layers.Dense(config.mlp_dim, name="lin1") self.lin2 = keras.layers.Dense(config.hidden_size, name="lin2") self.act = ACT2FN[config.hidden_act] self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.lin1(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.lin2(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "lin1", None) is not None: with tf.name_scope(self.lin1.name): self.lin1.build([None, None, self.config.hidden_size]) if getattr(self, "lin2", None) is not None: with tf.name_scope(self.lin2.name): self.lin2.build([None, None, self.config.mlp_dim]) class TFSamLayerNorm(keras.layers.Layer): r"""LayerNorm that supports two data formats: channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height, width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width). """ def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last", **kwargs): super().__init__(**kwargs) self.eps = eps self.data_format = data_format self.normalized_shape = normalized_shape if self.data_format not in ["channels_last", "channels_first"]: raise NotImplementedError(f"Unsupported data format: {self.data_format}") def build(self, input_shape): self.weight = self.add_weight(shape=self.normalized_shape, initializer="ones", name="weight") self.bias = self.add_weight(shape=self.normalized_shape, initializer="zeros", name="bias") super().build(input_shape) def call(self, x: tf.Tensor) -> tf.Tensor: if self.data_format == "channels_last": x = functional_layernorm(x, weight=self.weight, bias=self.bias, epsilon=self.eps, axis=-1) elif self.data_format == "channels_first": x = functional_layernorm(x, weight=self.weight, bias=self.bias, epsilon=self.eps, axis=1) return x class TFSamAttention(keras.layers.Layer): """ SAM's attention layer that allows for downscaling the size of the embedding after projection to queries, keys, and values. """ def __init__(self, config, downsample_rate=None, **kwargs): super().__init__(**kwargs) self.hidden_size = config.hidden_size downsample_rate = config.attention_downsample_rate if downsample_rate is None else downsample_rate self.internal_dim = config.hidden_size // downsample_rate self.num_attention_heads = config.num_attention_heads if self.internal_dim % config.num_attention_heads != 0: raise ValueError("num_attention_heads must divide hidden_size.") self.q_proj = keras.layers.Dense(self.internal_dim, name="q_proj") self.k_proj = keras.layers.Dense(self.internal_dim, name="k_proj") self.v_proj = keras.layers.Dense(self.internal_dim, name="v_proj") self.out_proj = keras.layers.Dense(self.hidden_size, name="out_proj") def _separate_heads(self, hidden_states: tf.Tensor, num_attention_heads: int) -> tf.Tensor: batch, point_batch_size, n_tokens, channel = shape_list(hidden_states) c_per_head = channel // num_attention_heads hidden_states = tf.reshape( hidden_states, (batch * point_batch_size, n_tokens, num_attention_heads, c_per_head) ) return tf.transpose(hidden_states, perm=[0, 2, 1, 3]) def _recombine_heads(self, hidden_states: tf.Tensor, point_batch_size: int) -> tf.Tensor: batch, n_heads, n_tokens, c_per_head = shape_list(hidden_states) hidden_states = tf.transpose(hidden_states, perm=[0, 2, 1, 3]) return tf.reshape( hidden_states, (batch // tf.reduce_max([1, point_batch_size]), point_batch_size, n_tokens, n_heads * c_per_head), ) def call(self, query: tf.Tensor, key: tf.Tensor, value: tf.Tensor) -> tf.Tensor: # Input projections query = self.q_proj(query) key = self.k_proj(key) value = self.v_proj(value) point_batch_size = shape_list(query)[1] # Separate into heads query = self._separate_heads(query, self.num_attention_heads) key = self._separate_heads(key, self.num_attention_heads) value = self._separate_heads(value, self.num_attention_heads) # SamAttention _, _, _, c_per_head = shape_list(query) attn = tf.matmul( query, tf.transpose(key, perm=[0, 1, 3, 2]) ) # batch_size * point_batch_size x N_heads x N_tokens x N_tokens attn = attn / tf.math.sqrt(float(c_per_head)) attn = tf.nn.softmax(attn, axis=-1) # Get output out = tf.matmul(attn, value) out = self._recombine_heads(out, point_batch_size) out = self.out_proj(out) return out def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "q_proj", None) is not None: with tf.name_scope(self.q_proj.name): self.q_proj.build([None, None, self.hidden_size]) if getattr(self, "k_proj", None) is not None: with tf.name_scope(self.k_proj.name): self.k_proj.build([None, None, self.hidden_size]) if getattr(self, "v_proj", None) is not None: with tf.name_scope(self.v_proj.name): self.v_proj.build([None, None, self.hidden_size]) if getattr(self, "out_proj", None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.internal_dim]) class TFSamTwoWayAttentionBlock(keras.layers.Layer): def __init__(self, config, attention_downsample_rate: int = 2, skip_first_layer_pe: bool = False, **kwargs): """ A transformer block with four layers: (1) self-attention of sparse inputs (2) cross attention of sparse inputs -> dense inputs (3) mlp block on sparse inputs (4) cross attention of dense inputs -> sparse inputs Arguments: config (`SamMaskDecoderConfig`): The configuration file used to instantiate the block attention_downsample_rate (*optionalk*, int, defaults to 2): The downsample ratio of the block used to reduce the inner dim of the attention. skip_first_layer_pe (*optional*, bool, defaults to `False`): Whether or not to skip the addition of the query_point_embedding on the first layer. """ super().__init__(**kwargs) self.hidden_size = config.hidden_size self.layer_norm_eps = config.layer_norm_eps self.self_attn = TFSamAttention(config, downsample_rate=1, name="self_attn") self.layer_norm1 = keras.layers.LayerNormalization(epsilon=self.layer_norm_eps, name="layer_norm1") self.cross_attn_token_to_image = TFSamAttention( config, downsample_rate=attention_downsample_rate, name="cross_attn_token_to_image" ) self.layer_norm2 = keras.layers.LayerNormalization(epsilon=self.layer_norm_eps, name="layer_norm2") self.mlp = TFSamMLPBlock(config, name="mlp") self.layer_norm3 = keras.layers.LayerNormalization(epsilon=self.layer_norm_eps, name="layer_norm3") self.layer_norm4 = keras.layers.LayerNormalization(epsilon=self.layer_norm_eps, name="layer_norm4") self.cross_attn_image_to_token = TFSamAttention( config, downsample_rate=attention_downsample_rate, name="cross_attn_image_to_token" ) self.skip_first_layer_pe = skip_first_layer_pe def call( self, queries: tf.Tensor, keys: tf.Tensor, query_point_embedding: tf.Tensor, key_point_embedding: tf.Tensor, output_attentions: bool = False, ): # Self attention block if self.skip_first_layer_pe: queries = self.self_attn(query=queries, key=queries, value=queries) else: query = queries + query_point_embedding attn_out = self.self_attn(query=query, key=query, value=queries) queries = queries + attn_out queries = self.layer_norm1(queries) # Cross attention block, tokens attending to image embedding query = queries + query_point_embedding key = keys + key_point_embedding attn_out = self.cross_attn_token_to_image(query=query, key=key, value=keys) queries = queries + attn_out queries = self.layer_norm2(queries) # MLP block mlp_out = self.mlp(queries) queries = queries + mlp_out queries = self.layer_norm3(queries) # Cross attention block, image embedding attending to tokens query = queries + query_point_embedding key = keys + key_point_embedding attn_out = self.cross_attn_image_to_token(query=key, key=query, value=queries) keys = keys + attn_out keys = self.layer_norm4(keys) outputs = (queries, keys) if output_attentions: outputs = outputs + (attn_out,) else: outputs = outputs + (None,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attn", None) is not None: with tf.name_scope(self.self_attn.name): self.self_attn.build(None) if getattr(self, "layer_norm1", None) is not None: with tf.name_scope(self.layer_norm1.name): self.layer_norm1.build([None, None, None, self.hidden_size]) if getattr(self, "cross_attn_token_to_image", None) is not None: with tf.name_scope(self.cross_attn_token_to_image.name): self.cross_attn_token_to_image.build(None) if getattr(self, "layer_norm2", None) is not None: with tf.name_scope(self.layer_norm2.name): self.layer_norm2.build([None, None, None, self.hidden_size]) if getattr(self, "mlp", None) is not None: with tf.name_scope(self.mlp.name): self.mlp.build(None) if getattr(self, "layer_norm3", None) is not None: with tf.name_scope(self.layer_norm3.name): self.layer_norm3.build([None, None, None, self.hidden_size]) if getattr(self, "layer_norm4", None) is not None: with tf.name_scope(self.layer_norm4.name): self.layer_norm4.build([None, None, None, self.hidden_size]) if getattr(self, "cross_attn_image_to_token", None) is not None: with tf.name_scope(self.cross_attn_image_to_token.name): self.cross_attn_image_to_token.build(None) class TFSamTwoWayTransformer(keras.layers.Layer): def __init__(self, config: SamMaskDecoderConfig, **kwargs): super().__init__(**kwargs) self.config = config self.num_hidden_layers = config.num_hidden_layers self.layers = [] for i in range(self.num_hidden_layers): self.layers.append(TFSamTwoWayAttentionBlock(config, skip_first_layer_pe=(i == 0), name=f"layers_._{i}")) self.final_attn_token_to_image = TFSamAttention(config, name="final_attn_token_to_image") self.layer_norm_final_attn = keras.layers.LayerNormalization( epsilon=config.layer_norm_eps, name="layer_norm_final_attn" ) def call( self, point_embeddings: tf.Tensor, image_embeddings: tf.Tensor, image_positional_embeddings: tf.Tensor, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, TFBaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict all_attentions = () if image_embeddings is None: raise ValueError("You have to specify an image_embedding") image_embeddings = tf.transpose(flatten(image_embeddings, 2), perm=(0, 2, 1))[:, None] image_positional_embeddings = tf.transpose(flatten(image_positional_embeddings, 2), (0, 2, 1))[:, None] # Prepare queries queries = point_embeddings keys = image_embeddings # Apply transformer blocks and final layernorm for layer in self.layers: queries, keys, attention_outputs = layer( queries=queries, keys=keys, query_point_embedding=point_embeddings, key_point_embedding=image_positional_embeddings, output_attentions=output_attentions, ) if output_attentions: all_attentions = all_attentions + (attention_outputs,) # Apply the final attenion layer from the points to the image query = queries + point_embeddings key = keys + image_positional_embeddings attn_out = self.final_attn_token_to_image(query=query, key=key, value=keys) queries = queries + attn_out queries = self.layer_norm_final_attn(queries) return queries, keys, all_attentions def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "final_attn_token_to_image", None) is not None: with tf.name_scope(self.final_attn_token_to_image.name): self.final_attn_token_to_image.build(None) if getattr(self, "layer_norm_final_attn", None) is not None: with tf.name_scope(self.layer_norm_final_attn.name): self.layer_norm_final_attn.build([None, None, None, self.config.hidden_size]) for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) class TFSamFeedForward(keras.layers.Layer): def __init__( self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int, sigmoid_output: bool = False, **kwargs ): super().__init__(**kwargs) self.num_layers = num_layers self.activation = keras.layers.ReLU() self.proj_in = keras.layers.Dense(hidden_dim, input_shape=(input_dim,), name="proj_in") self.proj_out = keras.layers.Dense(output_dim, input_shape=(hidden_dim,), name="proj_out") self.layers = [ keras.layers.Dense(hidden_dim, input_shape=(hidden_dim,), name=f"layers_._{i}") for i in range(num_layers - 2) ] self.sigmoid_output = sigmoid_output self.hidden_dim = hidden_dim self.input_dim = input_dim def call(self, hidden_states): hidden_states = self.proj_in(hidden_states) hidden_states = self.activation(hidden_states) for layer in self.layers: hidden_states = self.activation(layer(hidden_states)) hidden_states = self.proj_out(hidden_states) if self.sigmoid_output: hidden_states = tf.sigmoid(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "proj_in", None) is not None: with tf.name_scope(self.proj_in.name): self.proj_in.build([None, None, self.input_dim]) if getattr(self, "proj_out", None) is not None: with tf.name_scope(self.proj_out.name): self.proj_out.build([None, None, self.hidden_dim]) if getattr(self, "layers", None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build([None, None, self.hidden_dim]) class TFSamMaskDecoder(keras.layers.Layer): def __init__(self, config: SamMaskDecoderConfig, **kwargs): super().__init__(**kwargs) self.hidden_size = config.hidden_size self.num_multimask_outputs = config.num_multimask_outputs self.num_mask_tokens = config.num_multimask_outputs + 1 self.transformer = TFSamTwoWayTransformer(config, name="transformer") self.upscale_conv1 = keras.layers.Conv2DTranspose( self.hidden_size // 4, kernel_size=2, strides=2, name="upscale_conv1", data_format="channels_first" ) self.upscale_conv2 = keras.layers.Conv2DTranspose( self.hidden_size // 8, kernel_size=2, strides=2, name="upscale_conv2", data_format="channels_first" ) self.upscale_layer_norm = TFSamLayerNorm( self.hidden_size // 4, data_format="channels_first", name="upscale_layer_norm" ) self.activation = tf.nn.gelu mlps_list = [] for i in range(self.num_mask_tokens): mlps_list += [ TFSamFeedForward( self.hidden_size, self.hidden_size, self.hidden_size // 8, 3, name=f"output_hypernetworks_mlps_._{i}", ) ] self.output_hypernetworks_mlps = mlps_list self.iou_prediction_head = TFSamFeedForward( self.hidden_size, config.iou_head_hidden_dim, self.num_mask_tokens, config.iou_head_depth, name="iou_prediction_head", ) def build(self, input_shape=None): if self.built: return self.built = True self.iou_token = self.add_weight(shape=(1, self.hidden_size), name="iou_token.weight", trainable=True) self.mask_tokens = self.add_weight( shape=(self.num_mask_tokens, self.hidden_size), name="mask_tokens.weight", trainable=True ) if getattr(self, "transformer", None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) if getattr(self, "upscale_conv1", None) is not None: with tf.name_scope(self.upscale_conv1.name): self.upscale_conv1.build([None, self.hidden_size, None, None]) if getattr(self, "upscale_conv2", None) is not None: with tf.name_scope(self.upscale_conv2.name): self.upscale_conv2.build([None, self.hidden_size // 4, None, None]) if getattr(self, "upscale_layer_norm", None) is not None: with tf.name_scope(self.upscale_layer_norm.name): self.upscale_layer_norm.build(None) if getattr(self, "iou_prediction_head", None) is not None: with tf.name_scope(self.iou_prediction_head.name): self.iou_prediction_head.build(None) for mlp in self.output_hypernetworks_mlps: with tf.name_scope(mlp.name): mlp.build(None) def call( self, image_embeddings: tf.Tensor, image_positional_embeddings: tf.Tensor, sparse_prompt_embeddings: tf.Tensor, dense_prompt_embeddings: tf.Tensor, multimask_output: bool, output_attentions: Optional[bool] = None, ) -> Tuple[tf.Tensor, tf.Tensor]: batch_size, num_channels, height, width = shape_list(image_embeddings) point_batch_size = tf.math.maximum(1, tf.shape(sparse_prompt_embeddings)[1]) output_tokens = tf.concat([self.iou_token, self.mask_tokens], axis=0) # Should be (1, 32) + (4, 32) = (5, 32) output_tokens = tf.tile( output_tokens[None, None, :], [batch_size, point_batch_size, 1, 1] ) # Should be (batch_size, point_size, 5, 32) # Matt: The original Torch code checked that the sum of sparse_prompt_embeddings equalled 0. However, this only # happens when the sparse prompt embeddings are an empty tensor with shape[1] == 0. I replaced # it with an explicit shape check to avoid data-dependent control flow which breaks XLA. if shape_list(sparse_prompt_embeddings)[1] != 0: tokens = tf.concat((output_tokens, sparse_prompt_embeddings), axis=2) else: tokens = output_tokens point_embeddings = tf.cast(tokens, self.iou_token.dtype) image_embeddings = image_embeddings + dense_prompt_embeddings image_embeddings = tf.repeat(image_embeddings, point_batch_size, axis=0) image_positional_embeddings = tf.repeat(image_positional_embeddings, point_batch_size, axis=0) point_embedding, image_embeddings, attentions = self.transformer( point_embeddings=point_embeddings, image_embeddings=image_embeddings, image_positional_embeddings=image_positional_embeddings, output_attentions=output_attentions, ) iou_token_out = point_embedding[:, :, 0, :] mask_tokens_out = point_embedding[:, :, 1 : (1 + self.num_mask_tokens), :] image_embeddings = tf.transpose(image_embeddings, perm=(0, 1, 3, 2)) image_embeddings = tf.reshape(image_embeddings, [batch_size * point_batch_size, num_channels, height, width]) upscaled_embedding = self.upscale_conv1(image_embeddings) upscaled_embedding = self.activation(self.upscale_layer_norm(upscaled_embedding)) upscaled_embedding = self.activation(self.upscale_conv2(upscaled_embedding)) hyper_in_list = [] for i in range(self.num_mask_tokens): current_mlp = self.output_hypernetworks_mlps[i] hyper_in_list += [current_mlp(mask_tokens_out[:, :, i, :])] hyper_in = tf.stack(hyper_in_list, axis=2) _, num_channels, height, width = shape_list(upscaled_embedding) upscaled_embedding = tf.reshape( upscaled_embedding, [batch_size, point_batch_size, num_channels, height * width] ) masks = tf.reshape(hyper_in @ upscaled_embedding, [batch_size, point_batch_size, -1, height, width]) iou_pred = self.iou_prediction_head(iou_token_out) if multimask_output: mask_slice = slice(1, None) else: mask_slice = slice(0, 1) masks = masks[:, :, mask_slice, :, :] iou_pred = iou_pred[:, :, mask_slice] outputs = (masks, iou_pred) if output_attentions: outputs = outputs + (attentions,) else: outputs = outputs + (None,) return outputs class TFSamPositionalEmbedding(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.scale = config.hidden_size // 2 self.config = config def build(self, input_shape): # TODO Matt: What is going on here? Why is a non-trainable weight randomly initialized? self.positional_embedding = self.add_weight( name="positional_embedding", shape=(2, self.config.num_pos_feats), initializer=keras.initializers.RandomNormal(mean=0.0, stddev=self.scale), trainable=False, ) super().build(input_shape) def call(self, input_coords, input_shape=None): """Positionally encode points that are normalized to [0,1].""" coordinates = tf.identity(input_coords) if input_shape is not None: coordinates = tf.stack( [ tf.cast(coordinates[:, :, :, 0], tf.float32) / input_shape[1], tf.cast(coordinates[:, :, :, 1], tf.float32) / input_shape[0], ], axis=-1, ) # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape coordinates = 2 * coordinates - 1 coordinates = tf.cast(coordinates, self.positional_embedding.dtype) coordinates = tf.matmul(coordinates, self.positional_embedding) coordinates = 2 * np.pi * coordinates # outputs d_1 x ... x d_n x channel shape return tf.concat([tf.sin(coordinates), tf.cos(coordinates)], axis=-1) class TFSamMaskEmbedding(keras.layers.Layer): def __init__(self, config: SamPromptEncoderConfig, **kwargs): super().__init__(**kwargs) self.mask_input_channels = config.mask_input_channels // 4 self.activation = ACT2FN[config.hidden_act] self.conv1 = keras.layers.Conv2D(self.mask_input_channels, kernel_size=2, strides=2, name="conv1") self.conv2 = keras.layers.Conv2D(config.mask_input_channels, kernel_size=2, strides=2, name="conv2") self.conv3 = keras.layers.Conv2D(config.hidden_size, kernel_size=1, name="conv3") self.layer_norm1 = TFSamLayerNorm(self.mask_input_channels, config.layer_norm_eps, name="layer_norm1") self.layer_norm2 = TFSamLayerNorm(self.mask_input_channels * 4, config.layer_norm_eps, name="layer_norm2") self.config = config def call(self, masks): masks = tf.transpose(masks, perm=(0, 2, 3, 1)) # Convert to channels-last hidden_states = self.conv1(masks) hidden_states = self.layer_norm1(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.conv2(hidden_states) hidden_states = self.layer_norm2(hidden_states) hidden_states = self.activation(hidden_states) dense_embeddings = self.conv3(hidden_states) dense_embeddings = tf.transpose(dense_embeddings, perm=(0, 3, 1, 2)) # Convert back to channels-first return dense_embeddings def build(self, input_shape=None): # This class needs an explicit build method because it isn't called with the standard dummy inputs if self.built: return self.built = True with tf.name_scope("conv1"): self.conv1.build([None, None, None, 1]) with tf.name_scope("conv2"): self.conv2.build([None, None, None, self.mask_input_channels]) with tf.name_scope("conv3"): self.conv3.build([None, None, None, self.mask_input_channels * 4]) with tf.name_scope("layer_norm1"): self.layer_norm1.build([None, None, None, self.mask_input_channels]) with tf.name_scope("layer_norm2"): self.layer_norm2.build([None, None, None, self.mask_input_channels * 4]) class TFSamPromptEncoder(keras.layers.Layer): def __init__(self, config: SamPromptEncoderConfig, shared_patch_embedding, **kwargs): super().__init__(**kwargs) self.shared_embedding = shared_patch_embedding self.mask_embed = TFSamMaskEmbedding(config, name="mask_embed") self.no_mask_embed = None self.image_embedding_size = (config.image_embedding_size, config.image_embedding_size) self.input_image_size = config.image_size self.point_embed = [] self.hidden_size = config.hidden_size self.not_a_point_embed = None self.config = config def build(self, input_shape=None): self.no_mask_embed = self.add_weight( name="no_mask_embed.weight", shape=(1, self.hidden_size), initializer=keras.initializers.RandomNormal(mean=0.0, stddev=0.02), trainable=True, ) self.point_embed = [ self.add_weight( name=f"point_embed_._{i}.weight", shape=(1, self.hidden_size), initializer=keras.initializers.RandomNormal(mean=0.0, stddev=0.02), trainable=True, ) for i in range(self.config.num_point_embeddings) ] self.not_a_point_embed = self.add_weight( name="not_a_point_embed.weight", shape=(1, self.hidden_size), initializer=keras.initializers.RandomNormal(mean=0.0, stddev=0.02), trainable=True, ) with tf.name_scope("mask_embed"): # We must explicitly build the mask embed because it isn't touched by the standard dummy inputs self.mask_embed.build( (None, self.config.mask_input_channels, self.config.image_size, self.config.image_size) ) if self.built: return self.built = True if getattr(self, "mask_embed", None) is not None: with tf.name_scope(self.mask_embed.name): self.mask_embed.build(None) def _embed_points(self, points: tf.Tensor, labels: tf.Tensor, pad: bool) -> tf.Tensor: """Embeds point prompts.""" points = points + 0.5 # Shift to center of pixel if pad: target_point_shape = (shape_list(points)[0], shape_list(points)[1], 1, shape_list(points)[-1]) target_labels_shape = (shape_list(points)[0], shape_list(points)[1], 1) padding_point = tf.zeros(target_point_shape, dtype=points.dtype) padding_label = -tf.ones(target_labels_shape, dtype=labels.dtype) points = tf.concat([points, padding_point], axis=2) labels = tf.concat([labels, padding_label], axis=2) input_shape = (self.input_image_size, self.input_image_size) point_embedding = self.shared_embedding(points, input_shape) point_embedding = tf.where(labels[..., None] == -1, self.not_a_point_embed[0], point_embedding) point_embedding = tf.where( labels[..., None] != -10, point_embedding, tf.zeros_like(point_embedding), ) point_embedding = tf.where( (labels == 0)[:, :, :, None], point_embedding + self.point_embed[0], point_embedding ) point_embedding = tf.where( (labels == 1)[:, :, :, None], point_embedding + self.point_embed[1], point_embedding ) return point_embedding def _embed_boxes(self, boxes: tf.Tensor) -> tf.Tensor: """Embeds box prompts.""" boxes = boxes + 0.5 # Shift to center of pixel batch_size, nb_boxes = shape_list(boxes)[:2] coords = tf.reshape(boxes, (batch_size, nb_boxes, 2, 2)) input_shape = (self.input_image_size, self.input_image_size) corner_embedding = self.shared_embedding(coords, input_shape) corner_embedding += tf.where( tf.range(shape_list(corner_embedding)[2])[None, None, :, None] == 0, self.point_embed[2][0], self.point_embed[3][0], ) return corner_embedding def call( self, batch_size: Optional[int], input_points: Optional[Tuple[tf.Tensor, tf.Tensor]], input_labels: tf.Tensor | None, input_boxes: tf.Tensor | None, input_masks: tf.Tensor | None, ) -> Tuple[tf.Tensor, tf.Tensor]: """ Embeds different types of prompts, returning both sparse and dense embeddings. Args: points (`tf.Tensor`, *optional*): point coordinates and labels to embed. boxes (`tf.Tensor`, *optional*): boxes to embed masks (`tf.Tensor`, *optional*): masks to embed """ sparse_embeddings = None if input_points is not None: batch_size, point_batch_size = shape_list(input_points)[:2] if input_labels is None: raise ValueError("If points are provided, labels must also be provided.") point_embeddings = self._embed_points(input_points, input_labels, pad=(input_boxes is None)) sparse_embeddings = tf.zeros( (batch_size, point_batch_size, 0, self.hidden_size), dtype=point_embeddings.dtype ) sparse_embeddings = tf.concat([sparse_embeddings, point_embeddings], axis=2) if input_boxes is not None: batch_size = shape_list(input_boxes)[0] box_embeddings = self._embed_boxes(input_boxes) if sparse_embeddings is None: sparse_embeddings = box_embeddings else: sparse_embeddings = tf.concat([sparse_embeddings, box_embeddings], axis=2) if input_masks is not None: dense_embeddings = self.mask_embed(input_masks) else: dense_embeddings = self.no_mask_embed[0] dense_embeddings = tf.reshape(dense_embeddings, (1, -1, 1, 1)) dense_embeddings = tf.tile( dense_embeddings, (batch_size, 1, self.image_embedding_size[0], self.image_embedding_size[1]) ) if sparse_embeddings is None: sparse_embeddings = tf.zeros((batch_size, 0, 1, self.hidden_size), dtype=dense_embeddings.dtype) return sparse_embeddings, dense_embeddings class TFSamVisionAttention(keras.layers.Layer): """Multi-head Attention block with relative position embeddings.""" def __init__(self, config, window_size, **kwargs): super().__init__(**kwargs) input_size = ( (config.image_size // config.patch_size, config.image_size // config.patch_size) if window_size == 0 else (window_size, window_size) ) self.input_size = input_size self.num_attention_heads = config.num_attention_heads head_dim = config.hidden_size // config.num_attention_heads self.head_dim = head_dim self.scale = head_dim**-0.5 self.dropout = config.attention_dropout self.qkv = keras.layers.Dense(config.hidden_size * 3, use_bias=config.qkv_bias, name="qkv") self.proj = keras.layers.Dense(config.hidden_size, name="proj") self.use_rel_pos = config.use_rel_pos if self.use_rel_pos: if input_size is None: raise ValueError("Input size must be provided if using relative positional encoding.") self.config = config def build(self, input_shape=None): if self.input_size is not None: # initialize relative positional embeddings self.rel_pos_h = self.add_weight( shape=(2 * self.input_size[0] - 1, self.head_dim), initializer="zeros", name="rel_pos_h" ) self.rel_pos_w = self.add_weight( shape=(2 * self.input_size[1] - 1, self.head_dim), initializer="zeros", name="rel_pos_w" ) if self.built: return self.built = True if getattr(self, "qkv", None) is not None: with tf.name_scope(self.qkv.name): self.qkv.build([None, None, self.config.hidden_size]) if getattr(self, "proj", None) is not None: with tf.name_scope(self.proj.name): self.proj.build([None, None, self.config.hidden_size]) def get_rel_pos(self, q_size: int, k_size: int, rel_pos: tf.Tensor) -> tf.Tensor: """ Get relative positional embeddings according to the relative positions of query and key sizes. Args: q_size (int): size of the query. k_size (int): size of key k. rel_pos (`tf.Tensor`): relative position embeddings (L, channel). Returns: Extracted positional embeddings according to relative positions. """ max_rel_dist = int(2 * max(q_size, k_size) - 1) # Interpolate rel pos if needed. if rel_pos.shape[0] != max_rel_dist: # Interpolate rel pos. rel_pos_resized = tf.image.resize( tf.reshape(rel_pos, (1, rel_pos.shape[0], -1)), size=(max_rel_dist, rel_pos.shape[1]), method="bilinear", ) rel_pos_resized = tf.reshape(rel_pos_resized, (-1, max_rel_dist)) else: rel_pos_resized = rel_pos # Scale the coords with short length if shapes for q and k are different. q_coords = tf.expand_dims(tf.range(q_size, dtype=tf.float32), 1) * max(k_size / q_size, 1.0) k_coords = tf.expand_dims(tf.range(k_size, dtype=tf.float32), 0) * max(q_size / k_size, 1.0) relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0) return tf.gather(rel_pos_resized, tf.cast(relative_coords, tf.int32)) def add_decomposed_rel_pos( self, attn: tf.Tensor, query: tf.Tensor, rel_pos_h: tf.Tensor, rel_pos_w: tf.Tensor, q_size: Tuple[int, int], k_size: Tuple[int, int], ) -> tf.Tensor: """ Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py Args: attn (`tf.Tensor`): attention map. query (`tf.Tensor`): query q in the attention layer with shape (batch_size, query_height * query_width, channel). rel_pos_h (`tf.Tensor`): relative position embeddings (Lh, channel) for height axis. rel_pos_w (`tf.Tensor`): relative position embeddings (Lw, channel) for width axis. q_size (tuple): spatial sequence size of query q with (query_height, query_width). k_size (tuple): spatial sequence size of key k with (key_height, key_width). Returns: attn (`tf.Tensor`): attention map with added relative positional embeddings. """ query_height, query_width = q_size key_height, key_width = k_size relative_position_height = self.get_rel_pos(query_height, key_height, rel_pos_h) relative_position_width = self.get_rel_pos(query_width, key_width, rel_pos_w) batch_size, _, dim = shape_list(query) reshaped_query = tf.reshape(query, (batch_size, query_height, query_width, dim)) rel_h = tf.einsum("bhwc,hkc->bhwk", reshaped_query, relative_position_height) rel_w = tf.einsum("bhwc,wkc->bhwk", reshaped_query, relative_position_width) attn = tf.reshape(attn, (batch_size, query_height, query_width, key_height, key_width)) attn = attn + tf.expand_dims(rel_h, axis=-1) + tf.expand_dims(rel_w, axis=-2) attn = tf.reshape(attn, (batch_size, query_height * query_width, key_height * key_width)) return attn def call(self, hidden_states: tf.Tensor, output_attentions=False, training=False) -> tf.Tensor: batch_size, height, width, _ = shape_list(hidden_states) # qkv with shape (3, batch_size, nHead, height * width, channel) qkv = tf.reshape(self.qkv(hidden_states), (batch_size, height * width, 3, self.num_attention_heads, -1)) qkv = tf.transpose(qkv, perm=(2, 0, 3, 1, 4)) # q, k, v with shape (batch_size * nHead, height * width, channel) query, key, value = tf.unstack( tf.reshape(qkv, (3, batch_size * self.num_attention_heads, height * width, -1)), axis=0 ) attn_weights = tf.matmul(query * self.scale, key, transpose_b=True) if self.use_rel_pos: attn_weights = self.add_decomposed_rel_pos( attn_weights, query, self.rel_pos_h, self.rel_pos_w, (height, width), (height, width) ) attn_weights = tf.nn.softmax(attn_weights, axis=-1) if training: attn_probs = tf.nn.dropout(attn_weights, rate=self.dropout) else: attn_probs = attn_weights attn_output = tf.reshape(attn_probs @ value, (batch_size, self.num_attention_heads, height, width, -1)) attn_output = tf.transpose(attn_output, perm=(0, 2, 3, 1, 4)) attn_output = tf.reshape(attn_output, (batch_size, height, width, self.config.hidden_size)) attn_output = self.proj(attn_output) if output_attentions: outputs = (attn_output, attn_weights) else: outputs = (attn_output, None) return outputs class TFSamVisionLayer(keras.layers.Layer): def __init__(self, config, window_size, **kwargs): super().__init__(**kwargs) self.layer_norm1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm1") self.attn = TFSamVisionAttention(config, window_size, name="attn") self.layer_norm2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm2") self.mlp = TFSamMLPBlock(config, name="mlp") self.window_size = window_size self.config = config def window_partition(self, hidden_states: tf.Tensor, window_size: int) -> Tuple[tf.Tensor, Tuple[int, int]]: batch_size, height, width, channel = shape_list(hidden_states) pad_h = (window_size - height % window_size) % window_size pad_w = (window_size - width % window_size) % window_size if pad_h > 0 or pad_w > 0: hidden_states = tf.pad(hidden_states, [[0, 0], [0, pad_h], [0, pad_w], [0, 0]]) pad_height, pad_width = height + pad_h, width + pad_w hidden_states = tf.reshape( hidden_states, [batch_size, pad_height // window_size, window_size, pad_width // window_size, window_size, channel], ) windows = tf.reshape( tf.transpose(hidden_states, perm=[0, 1, 3, 2, 4, 5]), [-1, window_size, window_size, channel] ) return windows, (pad_height, pad_width) def window_unpartition( self, windows: tf.Tensor, window_size: int, padding_shape: Tuple[int, int], original_shape: Tuple[int, int] ) -> tf.Tensor: pad_height, pad_width = padding_shape height, width = original_shape batch_size = shape_list(windows)[0] // (pad_height * pad_width // window_size // window_size) hidden_states = tf.reshape( windows, [batch_size, pad_height // window_size, pad_width // window_size, window_size, window_size, -1] ) hidden_states = tf.reshape( tf.transpose(hidden_states, perm=[0, 1, 3, 2, 4, 5]), [batch_size, pad_height, pad_width, -1] ) if pad_height > height or pad_width > width: hidden_states = hidden_states[:, :height, :width, :] return hidden_states def call( self, hidden_states: tf.Tensor, output_attentions: Optional[bool] = False, training: Optional[bool] = False, ) -> Tuple[tf.Tensor]: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) if self.window_size > 0: height, width = hidden_states.shape[1], hidden_states.shape[2] hidden_states, padding_shape = self.window_partition(hidden_states, self.window_size) hidden_states, attn_weights = self.attn( hidden_states=hidden_states, output_attentions=output_attentions, training=training, ) if self.window_size > 0: hidden_states = self.window_unpartition(hidden_states, self.window_size, padding_shape, (height, width)) hidden_states = residual + hidden_states layernorm_output = self.layer_norm2(hidden_states) hidden_states = hidden_states + self.mlp(layernorm_output) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layer_norm1", None) is not None: with tf.name_scope(self.layer_norm1.name): self.layer_norm1.build([None, None, None, self.config.hidden_size]) if getattr(self, "attn", None) is not None: with tf.name_scope(self.attn.name): self.attn.build(None) if getattr(self, "layer_norm2", None) is not None: with tf.name_scope(self.layer_norm2.name): self.layer_norm2.build([None, None, None, self.config.hidden_size]) if getattr(self, "mlp", None) is not None: with tf.name_scope(self.mlp.name): self.mlp.build(None) class TFSamVisionNeck(keras.layers.Layer): def __init__(self, config: SamVisionConfig, **kwargs): super().__init__(**kwargs) self.config = config self.conv1 = keras.layers.Conv2D( config.output_channels, kernel_size=1, use_bias=False, name="conv1", ) self.layer_norm1 = TFSamLayerNorm(config.output_channels, name="layer_norm1") self.conv2 = keras.layers.Conv2D( config.output_channels, kernel_size=3, padding="same", use_bias=False, name="conv2", ) self.layer_norm2 = TFSamLayerNorm(config.output_channels, name="layer_norm2") def call(self, hidden_states): hidden_states = self.conv1(hidden_states) hidden_states = self.layer_norm1(hidden_states) hidden_states = self.conv2(hidden_states) hidden_states = self.layer_norm2(hidden_states) hidden_states = tf.transpose(hidden_states, perm=[0, 3, 1, 2]) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "conv1", None) is not None: with tf.name_scope(self.conv1.name): self.conv1.build([None, None, None, self.config.hidden_size]) if getattr(self, "layer_norm1", None) is not None: with tf.name_scope(self.layer_norm1.name): self.layer_norm1.build(None) if getattr(self, "conv2", None) is not None: with tf.name_scope(self.conv2.name): self.conv2.build([None, None, None, self.config.output_channels]) if getattr(self, "layer_norm2", None) is not None: with tf.name_scope(self.layer_norm2.name): self.layer_norm2.build(None) class TFSamVisionEncoder(keras.layers.Layer): def __init__(self, config: SamVisionConfig, **kwargs): super().__init__(**kwargs) self.config = config self.image_size = config.image_size self.patch_embed = TFSamPatchEmbeddings(config, name="patch_embed") self.pos_embed = None self.layers = [] for i in range(config.num_hidden_layers): layer = TFSamVisionLayer( config, window_size=config.window_size if i not in config.global_attn_indexes else 0, name=f"layers_._{i}", ) self.layers.append(layer) self.neck = TFSamVisionNeck(config, name="neck") def build(self, input_shape=None): if self.built: return self.built = True if self.config.use_abs_pos: # Initialize absolute positional embedding with pretrain image size. self.pos_embed = self.add_weight( shape=[ 1, self.config.image_size // self.config.patch_size, self.config.image_size // self.config.patch_size, self.config.hidden_size, ], initializer="zeros", trainable=True, name="pos_embed", ) if getattr(self, "patch_embed", None) is not None: with tf.name_scope(self.patch_embed.name): self.patch_embed.build(None) if getattr(self, "neck", None) is not None: with tf.name_scope(self.neck.name): self.neck.build(None) for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) def get_input_embeddings(self): return self.patch_embed def call( self, pixel_values: tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[Tuple, TFSamVisionEncoderOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.patch_embed(pixel_values) if self.pos_embed is not None: hidden_states = hidden_states + self.pos_embed all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module(hidden_states, output_attentions=output_attentions, training=training) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) hidden_states = self.neck(hidden_states) if not return_dict: outputs = (hidden_states,) if output_hidden_states: outputs = outputs + (all_hidden_states,) if output_attentions: outputs = outputs + (all_self_attentions,) return outputs return TFSamVisionEncoderOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class TFSamPreTrainedModel(TFPreTrainedModel): config_class = SamConfig base_model_prefix = "sam" main_input_name = "pixel_values" SAM_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a TensorFlow [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TensorFlow Model and refer to the TensorFlow documentation for all matter related to general usage and behavior. Parameters: config ([`SamConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ SAM_INPUTS_DOCSTRING = r""" Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`SamProcessor`]. See [`SamProcessor.__call__`] for details. input_points (`tf.Tensor` of shape `(batch_size, num_points, 2)`): Input 2D spatial points, this is used by the prompt encoder to encode the prompt. Generally yields to much better results. The points can be obtained by passing a list of list of list to the processor that will create corresponding `tf` tensors of dimension 4. The first dimension is the image batch size, the second dimension is the point batch size (i.e. how many segmentation masks do we want the model to predict per input point), the third dimension is the number of points per segmentation mask (it is possible to pass multiple points for a single mask), and the last dimension is the x (vertical) and y (horizontal) coordinates of the point. If a different number of points is passed either for each image, or for each mask, the processor will create "PAD" points that will correspond to the (0, 0) coordinate, and the computation of the embedding will be skipped for these points using the labels. input_labels (`tf.Tensor` of shape `(batch_size, point_batch_size, num_points)`): Input labels for the points, this is used by the prompt encoder to encode the prompt. According to the official implementation, there are 3 types of labels - `1`: the point is a point that contains the object of interest - `0`: the point is a point that does not contain the object of interest - `-1`: the point corresponds to the background We added the label: - `-10`: the point is a padding point, thus should be ignored by the prompt encoder The padding labels should be automatically done by the processor. input_boxes (`tf.Tensor` of shape `(batch_size, num_boxes, 4)`): Input boxes for the points, this is used by the prompt encoder to encode the prompt. Generally yields to much better generated masks. The boxes can be obtained by passing a list of list of list to the processor, that will generate a `tf` tensor, with each dimension corresponding respectively to the image batch size, the number of boxes per image and the coordinates of the top left and botton right point of the box. In the order (`x1`, `y1`, `x2`, `y2`): - `x1`: the x coordinate of the top left point of the input box - `y1`: the y coordinate of the top left point of the input box - `x2`: the x coordinate of the bottom right point of the input box - `y2`: the y coordinate of the bottom right point of the input box input_masks (`tf.Tensor` of shape `(batch_size, image_size, image_size)`): SAM model also accepts segmentation masks as input. The mask will be embedded by the prompt encoder to generate a corresponding embedding, that will be fed later on to the mask decoder. These masks needs to be manually fed by the user, and they need to be of shape (`batch_size`, `image_size`, `image_size`). image_embeddings (`tf.Tensor` of shape `(batch_size, output_channels, window_size, window_size)`): Image embeddings, this is used by the mask decder to generate masks and iou scores. For more memory efficient computation, users can first retrieve the image embeddings using the `get_image_embeddings` method, and then feed them to the `call` method instead of feeding the `pixel_values`. multimask_output (`bool`, *optional*): In the original implementation and paper, the model always outputs 3 masks per image (or per point / per bounding box if relevant). However, it is possible to just output a single mask, that corresponds to the "best" mask, by specifying `multimask_output=False`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "Segment Anything Model (SAM) for generating segmentation masks, given an input image and ", " optional 2D location and bounding boxes.", SAM_START_DOCSTRING, ) class TFSamModel(TFSamPreTrainedModel): _keys_to_ignore_on_load_missing = [r"prompt_encoder.shared_embedding.positional_embedding"] def __init__(self, config, **kwargs): super().__init__(config, **kwargs) self.shared_image_embedding = TFSamPositionalEmbedding(config.vision_config, name="shared_image_embedding") self.vision_encoder = TFSamVisionEncoder(config.vision_config, name="vision_encoder") self.prompt_encoder = TFSamPromptEncoder( config.prompt_encoder_config, self.shared_image_embedding, name="prompt_encoder" ) self.mask_decoder = TFSamMaskDecoder(config.mask_decoder_config, name="mask_decoder") self.config = config def get_input_embeddings(self): return self.vision_encoder.get_input_embeddings() def get_image_wide_positional_embeddings(self): size = self.config.prompt_encoder_config.image_embedding_size grid = tf.ones((size, size)) y_embed = tf.math.cumsum(grid, axis=0) - 0.5 x_embed = tf.math.cumsum(grid, axis=1) - 0.5 y_embed = y_embed / size x_embed = x_embed / size positional_embedding = self.shared_image_embedding(tf.stack([x_embed, y_embed], axis=-1)) return tf.expand_dims(tf.transpose(positional_embedding, perm=[2, 0, 1]), axis=0) # channel x height x width def get_image_embeddings( self, pixel_values, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): r""" Returns the image embeddings by passing the pixel values through the vision encoder. Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Input pixel values output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.TFModelOutput`] instead of a plain tuple. """ vision_output = self.vision_encoder( pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) image_embeddings = vision_output[0] return image_embeddings def get_prompt_embeddings( self, input_points: tf.Tensor | None = None, input_labels: tf.Tensor | None = None, input_boxes: tf.Tensor | None = None, input_masks: tf.Tensor | None = None, ): r""" Returns the prompt embeddings by passing the input points, labels, boxes and masks through the prompt encoder. Args: input_points (`tf.Tensor` of shape `(batch_size, point_batch_size, num_points_per_image, 2)`): Optional input points for the prompt encoder. The padding of the point is automatically done by the processor. `point_batch_size` refers to the number of masks that we want the model to predict per point. The model will output `point_batch_size` times 3 masks in total. input_labels (`tf.Tensor` of shape `(batch_size, point_batch_size, num_points_per_image)`): Optional input labels for the prompt encoder. The padding of the labels is automatically done by the processor, or can be fed by the user. input_boxes (`tf.Tensor` of shape `(batch_size, num_boxes_per_image, 4)`): Optional input boxes for the prompt encoder. The padding of the boxes is automatically done by the processor. users can also pass manually the input boxes. input_masks (`tf.Tensor` of shape `(batch_size, image_size, image_size)`): Optional input masks for the prompt encoder. """ prompt_output = self.prompt_encoder( input_points=input_points, input_labels=input_labels, input_boxes=input_boxes, input_masks=input_masks, ) return prompt_output @unpack_inputs @add_start_docstrings_to_model_forward(SAM_INPUTS_DOCSTRING) def call( self, pixel_values: TFModelInputType | None = None, input_points: tf.Tensor | None = None, input_labels: tf.Tensor | None = None, input_boxes: tf.Tensor | None = None, input_masks: tf.Tensor | None = None, image_embeddings: tf.Tensor | None = None, multimask_output: bool = True, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, **kwargs, ) -> TFSamImageSegmentationOutput | Tuple[tf.Tensor]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None and image_embeddings is None: raise ValueError("Either pixel_values or image_embeddings must be provided.") if pixel_values is not None and image_embeddings is not None: raise ValueError("Only one of pixel_values and image_embeddings can be provided.") if input_points is not None and len(input_points.shape) != 4: raise ValueError( "The input_points must be a 4D tensor. Of shape `batch_size`, `point_batch_size`, `nb_points_per_image`, `2`.", " got {}.".format(input_points.shape), ) if input_boxes is not None and len(input_boxes.shape) != 3: raise ValueError( "The input_points must be a 3D tensor. Of shape `batch_size`, `nb_boxes`, `4`.", " got {}.".format(input_boxes.shape), ) if input_points is not None and input_boxes is not None: point_batch_size = shape_list(input_points)[1] box_batch_size = shape_list(input_boxes)[1] if point_batch_size != box_batch_size: raise ValueError( "You should provide as many bounding boxes as input points per box. Got {} and {}.".format( point_batch_size, box_batch_size ) ) if pixel_values is not None: # Ensures that later checks pass even with an all-None shape from the serving signature pixel_values = tf.ensure_shape( pixel_values, [ None, self.config.vision_config.num_channels, self.config.vision_config.image_size, self.config.vision_config.image_size, ], ) image_positional_embeddings = self.get_image_wide_positional_embeddings() # repeat with batch size batch_size = shape_list(pixel_values)[0] if pixel_values is not None else shape_list(image_embeddings)[0] image_positional_embeddings = tf.repeat(image_positional_embeddings, batch_size, axis=0) vision_attentions = None vision_hidden_states = None if pixel_values is not None: vision_outputs = self.vision_encoder( pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, training=training, ) image_embeddings = vision_outputs["last_hidden_state"] if output_hidden_states: vision_hidden_states = vision_outputs["hidden_states"] if output_attentions: vision_attentions = vision_outputs["attentions"] if input_points is not None and input_labels is None: input_labels = tf.ones_like(input_points[:, :, :, 0], dtype=tf.int32) if input_points is not None and image_embeddings.shape[0] != input_points.shape[0]: raise ValueError( "The batch size of the image embeddings and the input points must be the same. ", "Got {} and {} respectively.".format(image_embeddings.shape[0], input_points.shape[0]), " if you want to pass multiple points for the same image, make sure that you passed ", " input_points of shape (batch_size, point_batch_size, num_points_per_image, 3) and ", " input_labels of shape (batch_size, point_batch_size, num_points_per_image)", ) sparse_embeddings, dense_embeddings = self.prompt_encoder( batch_size=shape_list(image_embeddings)[0], input_points=input_points, input_labels=input_labels, input_boxes=input_boxes, input_masks=input_masks, ) low_res_masks, iou_predictions, mask_decoder_attentions = self.mask_decoder( image_embeddings=image_embeddings, image_positional_embeddings=image_positional_embeddings, sparse_prompt_embeddings=sparse_embeddings, dense_prompt_embeddings=dense_embeddings, multimask_output=multimask_output, output_attentions=output_attentions, ) if not return_dict: output = (iou_predictions, low_res_masks) if output_hidden_states: output = output + (vision_hidden_states,) if output_attentions: output = output + (vision_attentions, mask_decoder_attentions) return output return TFSamImageSegmentationOutput( iou_scores=iou_predictions, pred_masks=low_res_masks, vision_hidden_states=vision_hidden_states, vision_attentions=vision_attentions, mask_decoder_attentions=mask_decoder_attentions, ) def serving_output(self, output: TFSamImageSegmentationOutput) -> TFSamImageSegmentationOutput: hs = tf.convert_to_tensor(output.vision_hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.vision_attentions) if self.config.output_attentions else None return TFSamImageSegmentationOutput( iou_scores=output.iou_scores, pred_masks=output.pred_masks, vision_hidden_states=hs if self.config.output_hidden_states else None, vision_attentions=attns if self.config.output_attentions else None, mask_decoder_attentions=output.mask_decoder_attentions if self.config.output_attentions else None, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "shared_image_embedding", None) is not None: with tf.name_scope(self.shared_image_embedding.name): self.shared_image_embedding.build(None) if getattr(self, "vision_encoder", None) is not None: with tf.name_scope(self.vision_encoder.name): self.vision_encoder.build(None) if getattr(self, "prompt_encoder", None) is not None: with tf.name_scope(self.prompt_encoder.name): self.prompt_encoder.build(None) if getattr(self, "mask_decoder", None) is not None: with tf.name_scope(self.mask_decoder.name): self.mask_decoder.build(None)
transformers/src/transformers/models/sam/modeling_tf_sam.py/0
{ "file_path": "transformers/src/transformers/models/sam/modeling_tf_sam.py", "repo_id": "transformers", "token_count": 33371 }
359
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert SegFormer checkpoints.""" import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def rename_keys(state_dict, encoder_only=False): new_state_dict = OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith("head"): key = "segformer.encoder." + key if key.startswith("backbone"): key = key.replace("backbone", "segformer.encoder") if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 idx = key[key.find("patch_embed") + len("patch_embed")] key = key.replace(f"patch_embed{idx}", f"patch_embeddings.{int(idx)-1}") if "norm" in key: key = key.replace("norm", "layer_norm") if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 idx = key[key.find("segformer.encoder.layer_norm") + len("segformer.encoder.layer_norm")] key = key.replace(f"layer_norm{idx}", f"layer_norm.{int(idx)-1}") if "layer_norm1" in key: key = key.replace("layer_norm1", "layer_norm_1") if "layer_norm2" in key: key = key.replace("layer_norm2", "layer_norm_2") if "block" in key: # replace for example block1 by block.0 idx = key[key.find("block") + len("block")] key = key.replace(f"block{idx}", f"block.{int(idx)-1}") if "attn.q" in key: key = key.replace("attn.q", "attention.self.query") if "attn.proj" in key: key = key.replace("attn.proj", "attention.output.dense") if "attn" in key: key = key.replace("attn", "attention.self") if "fc1" in key: key = key.replace("fc1", "dense1") if "fc2" in key: key = key.replace("fc2", "dense2") if "linear_pred" in key: key = key.replace("linear_pred", "classifier") if "linear_fuse" in key: key = key.replace("linear_fuse.conv", "linear_fuse") key = key.replace("linear_fuse.bn", "batch_norm") if "linear_c" in key: # replace for example linear_c4 by linear_c.3 idx = key[key.find("linear_c") + len("linear_c")] key = key.replace(f"linear_c{idx}", f"linear_c.{int(idx)-1}") if key.startswith("head"): key = key.replace("head", "classifier") new_state_dict[key] = value return new_state_dict def read_in_k_v(state_dict, config): # for each of the encoder blocks: for i in range(config.num_encoder_blocks): for j in range(config.depths[i]): # read in weights + bias of keys and values (which is a single matrix in the original implementation) kv_weight = state_dict.pop(f"segformer.encoder.block.{i}.{j}.attention.self.kv.weight") kv_bias = state_dict.pop(f"segformer.encoder.block.{i}.{j}.attention.self.kv.bias") # next, add keys and values (in that order) to the state dict state_dict[f"segformer.encoder.block.{i}.{j}.attention.self.key.weight"] = kv_weight[ : config.hidden_sizes[i], : ] state_dict[f"segformer.encoder.block.{i}.{j}.attention.self.key.bias"] = kv_bias[: config.hidden_sizes[i]] state_dict[f"segformer.encoder.block.{i}.{j}.attention.self.value.weight"] = kv_weight[ config.hidden_sizes[i] :, : ] state_dict[f"segformer.encoder.block.{i}.{j}.attention.self.value.bias"] = kv_bias[ config.hidden_sizes[i] : ] # We will verify our results on a COCO image def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) return image @torch.no_grad() def convert_segformer_checkpoint(model_name, checkpoint_path, pytorch_dump_folder_path): """ Copy/paste/tweak model's weights to our SegFormer structure. """ # load default SegFormer configuration config = SegformerConfig() encoder_only = False # set attributes based on model_name repo_id = "huggingface/label-files" if "segformer" in model_name: size = model_name[len("segformer.") : len("segformer.") + 2] if "ade" in model_name: config.num_labels = 150 filename = "ade20k-id2label.json" expected_shape = (1, 150, 128, 128) elif "city" in model_name: config.num_labels = 19 filename = "cityscapes-id2label.json" expected_shape = (1, 19, 128, 128) else: raise ValueError(f"Model {model_name} not supported") elif "mit" in model_name: encoder_only = True size = model_name[4:6] config.num_labels = 1000 filename = "imagenet-1k-id2label.json" expected_shape = (1, 1000) else: raise ValueError(f"Model {model_name} not supported") # set config attributes id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} if size == "b0": pass elif size == "b1": config.hidden_sizes = [64, 128, 320, 512] config.decoder_hidden_size = 256 elif size == "b2": config.hidden_sizes = [64, 128, 320, 512] config.decoder_hidden_size = 768 config.depths = [3, 4, 6, 3] elif size == "b3": config.hidden_sizes = [64, 128, 320, 512] config.decoder_hidden_size = 768 config.depths = [3, 4, 18, 3] elif size == "b4": config.hidden_sizes = [64, 128, 320, 512] config.decoder_hidden_size = 768 config.depths = [3, 8, 27, 3] elif size == "b5": config.hidden_sizes = [64, 128, 320, 512] config.decoder_hidden_size = 768 config.depths = [3, 6, 40, 3] else: raise ValueError(f"Size {size} not supported") # load image processor (only resize + normalize) image_processor = SegformerImageProcessor( image_scale=(512, 512), keep_ratio=False, align=False, do_random_crop=False ) # prepare image image = prepare_img() pixel_values = image_processor(images=image, return_tensors="pt").pixel_values logger.info(f"Converting model {model_name}...") # load original state dict if encoder_only: state_dict = torch.load(checkpoint_path, map_location=torch.device("cpu")) else: state_dict = torch.load(checkpoint_path, map_location=torch.device("cpu"))["state_dict"] # rename keys state_dict = rename_keys(state_dict, encoder_only=encoder_only) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(state_dict, config) # create HuggingFace model and load state dict if encoder_only: config.reshape_last_stage = False model = SegformerForImageClassification(config) else: model = SegformerForSemanticSegmentation(config) model.load_state_dict(state_dict) model.eval() # forward pass outputs = model(pixel_values) logits = outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": expected_slice = torch.tensor( [ [[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]], [[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]], [[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]], ] ) elif model_name == "segformer.b1.512x512.ade.160k": expected_slice = torch.tensor( [ [[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]], [[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]], [[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]], ] ) elif model_name == "segformer.b2.512x512.ade.160k": expected_slice = torch.tensor( [ [[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]], [[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]], [[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]], ] ) elif model_name == "segformer.b3.512x512.ade.160k": expected_slice = torch.tensor( [ [[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]], [[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]], [[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]], ] ) elif model_name == "segformer.b4.512x512.ade.160k": expected_slice = torch.tensor( [ [[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]], [[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]], [[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]], ] ) elif model_name == "segformer.b5.640x640.ade.160k": expected_slice = torch.tensor( [ [[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]], [[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]], [[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]], ] ) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": expected_slice = torch.tensor( [ [[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]], [[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]], [[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]], ] ) elif model_name == "segformer.b0.512x1024.city.160k": expected_slice = torch.tensor( [ [[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]], [[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]], [[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]], ] ) elif model_name == "segformer.b0.640x1280.city.160k": expected_slice = torch.tensor( [ [ [-1.1372e01, -1.2787e01, -1.3477e01], [-1.2536e01, -1.4194e01, -1.4409e01], [-1.3217e01, -1.4888e01, -1.5327e01], ], [ [-1.4791e01, -1.7122e01, -1.8277e01], [-1.7163e01, -1.9192e01, -1.9533e01], [-1.7897e01, -1.9991e01, -2.0315e01], ], [ [7.6723e-01, 4.1921e-01, -7.7878e-02], [4.7772e-01, 9.5557e-03, -2.8082e-01], [3.6032e-01, -2.4826e-01, -5.1168e-01], ], ] ) elif model_name == "segformer.b0.768x768.city.160k": expected_slice = torch.tensor( [ [[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]], [[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]], [[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]], ] ) elif model_name == "segformer.b1.1024x1024.city.160k": expected_slice = torch.tensor( [ [[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]], [[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]], [[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]], ] ) elif model_name == "segformer.b2.1024x1024.city.160k": expected_slice = torch.tensor( [ [[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]], [[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]], [[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]], ] ) elif model_name == "segformer.b3.1024x1024.city.160k": expected_slice = torch.tensor( [ [[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]], [[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]], [[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]], ] ) elif model_name == "segformer.b4.1024x1024.city.160k": expected_slice = torch.tensor( [ [[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]], [[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]], [[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]], ] ) elif model_name == "segformer.b5.1024x1024.city.160k": expected_slice = torch.tensor( [ [[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]], [[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]], [[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]], ] ) else: predicted_class_idx = logits.argmax(-1).item() print("Predicted class:", model.config.id2label[predicted_class_idx]) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3], expected_slice, atol=1e-2) # finally, save model and image processor logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...") Path(pytorch_dump_folder_path).mkdir(exist_ok=True) model.save_pretrained(pytorch_dump_folder_path) image_processor.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--model_name", default="segformer.b0.512x512.ade.160k", type=str, help="Name of the model you'd like to convert.", ) parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) args = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
transformers/src/transformers/models/segformer/convert_segformer_original_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/segformer/convert_segformer_original_to_pytorch.py", "repo_id": "transformers", "token_count": 8906 }
360
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for SigLIP.""" from typing import Dict, List, Optional, Union from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging logger = logging.get_logger(__name__) if is_vision_available(): import PIL class SiglipImageProcessor(BaseImageProcessor): r""" Constructs a SigLIP image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`Dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`): Size of the image after resizing. Can be overridden by `size` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image by the specified mean and standard deviation. Can be overridden by `do_normalize` in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `[0.5, 0.5, 0.5]`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `[0.5, 0.5, 0.5]`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BICUBIC, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"height": 224, "width": 224} image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD self.do_resize = do_resize self.size = size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std def preprocess( self, images: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, resample: PILImageResampling = None, do_rescale: bool = None, rescale_factor: float = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size, param_name="size", default_to_square=False) resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True.") if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if is_scaled_image(images[0]) and do_rescale: logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_resize: height, width = size["height"], size["width"] images = [ resize(image=image, size=(height, width), resample=resample, input_data_format=input_data_format) for image in images ] if do_rescale: images = [ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images ] if do_normalize: images = [ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images ] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors)
transformers/src/transformers/models/siglip/image_processing_siglip.py/0
{ "file_path": "transformers/src/transformers/models/siglip/image_processing_siglip.py", "repo_id": "transformers", "token_count": 4605 }
361
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Speech processor class for Speech2Text """ import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class Speech2TextProcessor(ProcessorMixin): r""" Constructs a Speech2Text processor which wraps a Speech2Text feature extractor and a Speech2Text tokenizer into a single processor. [`Speech2TextProcessor`] offers all the functionalities of [`Speech2TextFeatureExtractor`] and [`Speech2TextTokenizer`]. See the [`~Speech2TextProcessor.__call__`] and [`~Speech2TextProcessor.decode`] for more information. Args: feature_extractor (`Speech2TextFeatureExtractor`): An instance of [`Speech2TextFeatureExtractor`]. The feature extractor is a required input. tokenizer (`Speech2TextTokenizer`): An instance of [`Speech2TextTokenizer`]. The tokenizer is a required input. """ feature_extractor_class = "Speech2TextFeatureExtractor" tokenizer_class = "Speech2TextTokenizer" def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) self.current_processor = self.feature_extractor self._in_target_context_manager = False def __call__(self, *args, **kwargs): """ When used in normal mode, this method forwards all its arguments to Speech2TextFeatureExtractor's [`~Speech2TextFeatureExtractor.__call__`] and returns its output. If used in the context [`~Speech2TextProcessor.as_target_processor`] this method forwards all its arguments to Speech2TextTokenizer's [`~Speech2TextTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more information. """ # For backward compatibility if self._in_target_context_manager: return self.current_processor(*args, **kwargs) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.") audio = kwargs.pop("raw_speech") else: audio = kwargs.pop("audio", None) sampling_rate = kwargs.pop("sampling_rate", None) text = kwargs.pop("text", None) if len(args) > 0: audio = args[0] args = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process.") if audio is not None: inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs) if text is not None: encodings = self.tokenizer(text, **kwargs) if text is None: return inputs elif audio is None: return encodings else: inputs["labels"] = encodings["input_ids"] return inputs def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to Speech2TextTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to Speech2TextTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @contextmanager def as_target_processor(self): """ Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning Speech2Text. """ warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call." ) self._in_target_context_manager = True self.current_processor = self.tokenizer yield self.current_processor = self.feature_extractor self._in_target_context_manager = False
transformers/src/transformers/models/speech_to_text/processing_speech_to_text.py/0
{ "file_path": "transformers/src/transformers/models/speech_to_text/processing_speech_to_text.py", "repo_id": "transformers", "token_count": 1792 }
362
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _import_structure = { "configuration_splinter": ["SPLINTER_PRETRAINED_CONFIG_ARCHIVE_MAP", "SplinterConfig"], "tokenization_splinter": ["SplinterTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_splinter_fast"] = ["SplinterTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_splinter"] = [ "SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST", "SplinterForQuestionAnswering", "SplinterForPreTraining", "SplinterLayer", "SplinterModel", "SplinterPreTrainedModel", ] if TYPE_CHECKING: from .configuration_splinter import SPLINTER_PRETRAINED_CONFIG_ARCHIVE_MAP, SplinterConfig from .tokenization_splinter import SplinterTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_splinter_fast import SplinterTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_splinter import ( SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST, SplinterForPreTraining, SplinterForQuestionAnswering, SplinterLayer, SplinterModel, SplinterPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
transformers/src/transformers/models/splinter/__init__.py/0
{ "file_path": "transformers/src/transformers/models/splinter/__init__.py", "repo_id": "transformers", "token_count": 927 }
363
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Swin SimMIM checkpoints from the original repository. URL: https://github.com/microsoft/Swin-Transformer/blob/main/MODELHUB.md#simmim-pretrained-swin-v1-models""" import argparse import requests import torch from PIL import Image from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor def get_swin_config(model_name): config = SwinConfig(image_size=192) if "base" in model_name: window_size = 6 embed_dim = 128 depths = (2, 2, 18, 2) num_heads = (4, 8, 16, 32) elif "large" in model_name: window_size = 12 embed_dim = 192 depths = (2, 2, 18, 2) num_heads = (6, 12, 24, 48) else: raise ValueError("Model not supported, only supports base and large variants") config.window_size = window_size config.embed_dim = embed_dim config.depths = depths config.num_heads = num_heads return config def rename_key(name): if "encoder.mask_token" in name: name = name.replace("encoder.mask_token", "embeddings.mask_token") if "encoder.patch_embed.proj" in name: name = name.replace("encoder.patch_embed.proj", "embeddings.patch_embeddings.projection") if "encoder.patch_embed.norm" in name: name = name.replace("encoder.patch_embed.norm", "embeddings.norm") if "attn.proj" in name: name = name.replace("attn.proj", "attention.output.dense") if "attn" in name: name = name.replace("attn", "attention.self") if "norm1" in name: name = name.replace("norm1", "layernorm_before") if "norm2" in name: name = name.replace("norm2", "layernorm_after") if "mlp.fc1" in name: name = name.replace("mlp.fc1", "intermediate.dense") if "mlp.fc2" in name: name = name.replace("mlp.fc2", "output.dense") if name == "encoder.norm.weight": name = "layernorm.weight" if name == "encoder.norm.bias": name = "layernorm.bias" if "decoder" in name: pass else: name = "swin." + name return name def convert_state_dict(orig_state_dict, model): for key in orig_state_dict.copy().keys(): val = orig_state_dict.pop(key) if "attn_mask" in key: pass elif "qkv" in key: key_split = key.split(".") layer_num = int(key_split[2]) block_num = int(key_split[4]) dim = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: orig_state_dict[ f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.weight" ] = val[:dim, :] orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.weight"] = val[ dim : dim * 2, : ] orig_state_dict[ f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.weight" ] = val[-dim:, :] else: orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.bias"] = val[ :dim ] orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.bias"] = val[ dim : dim * 2 ] orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.bias"] = val[ -dim: ] else: orig_state_dict[rename_key(key)] = val return orig_state_dict def convert_swin_checkpoint(model_name, checkpoint_path, pytorch_dump_folder_path, push_to_hub): state_dict = torch.load(checkpoint_path, map_location="cpu")["model"] config = get_swin_config(model_name) model = SwinForMaskedImageModeling(config) model.eval() new_state_dict = convert_state_dict(state_dict, model) model.load_state_dict(new_state_dict) url = "http://images.cocodataset.org/val2017/000000039769.jpg" image_processor = ViTImageProcessor(size={"height": 192, "width": 192}) image = Image.open(requests.get(url, stream=True).raw) inputs = image_processor(images=image, return_tensors="pt") with torch.no_grad(): outputs = model(**inputs).logits print(outputs.keys()) print("Looks ok!") if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving image processor to {pytorch_dump_folder_path}") image_processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print(f"Pushing model and image processor for {model_name} to hub") model.push_to_hub(f"microsoft/{model_name}") image_processor.push_to_hub(f"microsoft/{model_name}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="swin-base-simmim-window6-192", type=str, choices=["swin-base-simmim-window6-192", "swin-large-simmim-window12-192"], help="Name of the Swin SimMIM model you'd like to convert.", ) parser.add_argument( "--checkpoint_path", default="/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth", type=str, help="Path to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) args = parser.parse_args() convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
transformers/src/transformers/models/swin/convert_swin_simmim_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/swin/convert_swin_simmim_to_pytorch.py", "repo_id": "transformers", "token_count": 2896 }
364
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert SwitchTransformersX checkpoints from the original repository to JAX/FLAX model.""" import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from t5x import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument MOE_LAYER_NAME_MAPPING = { "/attention/": "/0/SelfAttention/", "/self_attention/": "/0/SelfAttention/", "/encoder_decoder_attention/": "/1/EncDecAttention/", "value": "v", "query": "q", "key": "k", "out": "o", "pre_self_attention_layer_norm": "0/layer_norm", "pre_cross_attention_layer_norm": "1/layer_norm", "pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong "token_embedder": "shared", "encoder_norm": "final_layer_norm", "decoder_norm": "final_layer_norm", "relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight", "router/router_weights/w/": "router/classifier/", "roer/roer_weights/w/": "router/classifier/", "logits_dense": "lm_head", } def rename_keys(s_dict): # 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in # the original model keys = list(s_dict.keys()) for key in keys: layer_to_block_of_layer = r".*/layers_(\d+)" new_key = key if re.match(layer_to_block_of_layer, key): new_key = re.sub(r"layers_(\d+)", r"block/\1/layer", new_key) layer_to_block_of_layer = r"(encoder|decoder)\/" if re.match(layer_to_block_of_layer, key): groups = re.match(layer_to_block_of_layer, new_key).groups() if groups[0] == "encoder": new_key = re.sub(r"/mlp/", r"/1/mlp/", new_key) new_key = re.sub(r"/pre_mlp_layer_norm/", r"/1/layer_norm/", new_key) elif groups[0] == "decoder": new_key = re.sub(r"/mlp/", r"/2/mlp/", new_key) new_key = re.sub(r"/pre_mlp_layer_norm/", r"/2/layer_norm/", new_key) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: new_key = new_key.replace(old_key, temp_key) print(f"{key} -> {new_key}") s_dict[new_key] = s_dict.pop(key) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: s_dict["encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"] = s_dict[ "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: s_dict["decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"] = s_dict[ "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys()): if "expert" in key: num_experts = s_dict[key].shape[0] expert_weihts = s_dict[key] for idx in range(num_experts): s_dict[key.replace("expert/", f"experts/expert_{idx}/")] = expert_weihts[idx] print(f"{key} -> {key.replace('expert/', f'experts/expert_{idx}/')}") s_dict.pop(key) return s_dict GIN_TO_CONFIG_MAPPING = { "NUM_ENCODER_LAYERS": "num_layers", "NUM_DECODER_LAYERS": "num_decoder_layers", "NUM_HEADS": "num_heads", "HEAD_DIM": "d_kv", "EMBED_DIM": "d_model", "MLP_DIM": "d_ff", "NUM_SELECTED_EXPERTS": "num_selected_experts", "NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers", "NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers", "dense.MlpBlock.activations": "feed_forward_proj", } def convert_gin_to_config(gin_file, num_experts): # Convert a google style config to the hugging face fromat import regex as re with open(gin_file, "r") as f: raw_gin = f.read() regex_match = re.findall(r"(.*) = ([0-9.]*)", raw_gin) args = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": args[GIN_TO_CONFIG_MAPPING[param]] = float(value) if "." in value else int(value) activation = re.findall(r"(.*activations) = \(\'(.*)\',\)", raw_gin)[0] args[GIN_TO_CONFIG_MAPPING[activation[0]]] = str(activation[1]) args["num_experts"] = num_experts config = SwitchTransformersConfig(**args) return config def convert_flax_checkpoint_to_pytorch( flax_checkpoint_path, config_file, gin_file=None, pytorch_dump_path="./", num_experts=8 ): # Initialise PyTorch model print(f"Loading flax weights from : {flax_checkpoint_path}") flax_params = checkpoints.load_t5x_checkpoint(flax_checkpoint_path) if gin_file is not None: config = convert_gin_to_config(gin_file, num_experts) else: config = SwitchTransformersConfig.from_pretrained(config_file) pt_model = SwitchTransformersForConditionalGeneration(config) flax_params = flax_params["target"] flax_params = flatten_dict(flax_params, sep="/") flax_params = rename_keys(flax_params) flax_params = unflatten_dict(flax_params, sep="/") # Load the flax params in the PT model load_flax_weights_in_pytorch_model(pt_model, flax_params) print(f"Save PyTorch model to {pytorch_dump_path}") pt_model.save_pretrained(pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--switch_t5x_checkpoint_path", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the" " model architecture. If not provided, a `gin_file` has to be provided." ), ) parser.add_argument( "--gin_file", default=None, type=str, required=False, help="Path to the gin config file. If not provided, a `config_file` has to be passed ", ) parser.add_argument( "--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model." ) parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts") args = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_t5x_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
transformers/src/transformers/models/switch_transformers/convert_switch_transformers_original_flax_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/switch_transformers/convert_switch_transformers_original_flax_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 3257 }
365
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Configuration for Backbone models""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class TimmBackboneConfig(PretrainedConfig): r""" This is the configuration class to store the configuration for a timm backbone [`TimmBackbone`]. It is used to instantiate a timm backbone model according to the specified arguments, defining the model. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: backbone (`str`, *optional*): The timm checkpoint to load. num_channels (`int`, *optional*, defaults to 3): The number of input channels. features_only (`bool`, *optional*, defaults to `True`): Whether to output only the features or also the logits. use_pretrained_backbone (`bool`, *optional*, defaults to `True`): Whether to use a pretrained backbone. out_indices (`List[int]`, *optional*): If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how many stages the model has). Will default to the last stage if unset. freeze_batch_norm_2d (`bool`, *optional*, defaults to `False`): Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`. Example: ```python >>> from transformers import TimmBackboneConfig, TimmBackbone >>> # Initializing a timm backbone >>> configuration = TimmBackboneConfig("resnet50") >>> # Initializing a model from the configuration >>> model = TimmBackbone(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "timm_backbone" def __init__( self, backbone=None, num_channels=3, features_only=True, use_pretrained_backbone=True, out_indices=None, freeze_batch_norm_2d=False, **kwargs, ): super().__init__(**kwargs) self.backbone = backbone self.num_channels = num_channels self.features_only = features_only self.use_pretrained_backbone = use_pretrained_backbone self.use_timm_backbone = True self.out_indices = out_indices if out_indices is not None else (-1,) self.freeze_batch_norm_2d = freeze_batch_norm_2d
transformers/src/transformers/models/timm_backbone/configuration_timm_backbone.py/0
{ "file_path": "transformers/src/transformers/models/timm_backbone/configuration_timm_backbone.py", "repo_id": "transformers", "token_count": 1106 }
366
# coding=utf-8 # Copyright 2023 The Intel AIA Team Authors, and HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License=, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing=, software # distributed under the License is distributed on an "AS IS" BASIS=, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND=, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch TVP Model""" import math from dataclasses import dataclass from typing import Optional, Tuple import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ModelOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import prune_linear_layer from ...utils import logging from ...utils.backbone_utils import load_backbone from .configuration_tvp import TvpConfig logger = logging.get_logger(__name__) TVP_PRETRAINED_MODEL_ARCHIVE_LIST = [ "Intel/tvp-base", "Intel/tvp-base-ANet", # See all Tvp models at https://huggingface.co/models?filter=tvp ] @dataclass class TvpVideoGroundingOutput(ModelOutput): """ Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Temporal-Distance IoU loss for video grounding. logits (`torch.FloatTensor` of shape `(batch_size, 2)`): Contains start_time/duration and end_time/duration. It is the time slot of the videos corresponding to the input texts. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None class TvpLoss(nn.Module): """ This class computes the losses for `TvpForVideoGrounding`. The process happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth / prediction (supervise class and box). Args: losses (`List[str]`): List of all the losses to be applied. """ def __init__(self, losses): super().__init__() self.loss_map = { "iou": self.loss_iou, "distance": self.loss_distance, "duration": self.loss_duration, } for loss in losses: if loss not in self.loss_map: raise ValueError(f"Loss {loss} not supported") self.losses = losses def loss_iou(self, start_time, end_time, candidates_start_time, candidates_end_time, duration): """ Measure the intersection over union. """ inter = torch.min(candidates_end_time, end_time) - torch.max(candidates_start_time, start_time) union = torch.max(candidates_end_time, end_time) - torch.min(candidates_start_time, start_time) iou = 1 - inter.clamp(min=0) / union return iou def loss_distance(self, start_time, end_time, candidates_start_time, candidates_end_time, duration): """ Measure the distance of mid points. """ mid_candidates = torch.div(torch.add(candidates_start_time, candidates_end_time), 2.0) mid_groundtruth = torch.div(torch.add(start_time, end_time), 2.0) distance_diff = torch.div( torch.max(mid_candidates, mid_groundtruth) - torch.min(mid_candidates, mid_groundtruth), duration ).clamp(min=0.2) return distance_diff def loss_duration(self, start_time, end_time, candidates_start_time, candidates_end_time, duration): """ Measure the difference of duration. """ duration_candidates = torch.sub(candidates_end_time, candidates_start_time) duration_groundtruth = torch.sub(end_time, start_time) duration_diff = torch.square(torch.div(torch.sub(duration_candidates, duration_groundtruth), duration)) duration_diff = duration_diff.clamp(min=0.4) return duration_diff def forward(self, logits, labels): """ This performs the loss computation. Args: logits (`torch.FloatTensor`): The output logits of head module. labels (`List[torch.FloatTensor]`): List of tensors ([start, end, duration]), which contains start time, end time of the video corresponding to the text, and also the duration. """ duration, start_time, end_time = labels candidates = torch.mul(logits, duration) candidates_start_time, candidates_end_time = candidates[:, 0].float(), candidates[:, 1].float() losses_dict = {} for loss in self.losses: losses_dict.update( {loss: self.loss_map[loss](start_time, end_time, candidates_start_time, candidates_end_time, duration)} ) return losses_dict class TvpVisionModel(nn.Module): def __init__(self, config): super().__init__() self.backbone = load_backbone(config) self.grid_encoder_conv = nn.Conv2d( config.backbone_config.hidden_sizes[-1], config.hidden_size, kernel_size=3, stride=1, padding=1, groups=1, bias=False, ) def forward(self, pixel_values): batch_size, num_frames, num_channels, height, width = pixel_values.shape # (batch_size * num_frames, num_channels, height, width) pixel_values = pixel_values.view(batch_size * num_frames, num_channels, height, width) grid_feat_outputs = self.backbone(pixel_values)["feature_maps"][0] grid = self.grid_encoder_conv(grid_feat_outputs) grid = nn.functional.max_pool2d(grid, kernel_size=2, stride=2) grid = nn.functional.relu(grid, inplace=True) new_channel, new_height, new_width = grid.shape[-3:] # (batch_size, num_frames, num_channels, height, width) grid = grid.view(batch_size, num_frames, new_channel, new_height, new_width) # (batch_size, num_frames, height, width, num_channels) grid = grid.permute(0, 1, 3, 4, 2) return grid class TvpVisualInputEmbedding(nn.Module): """ Takes input of both image and video (multi-frame) """ def __init__(self, config): super().__init__() # sequence embedding self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.row_position_embeddings = nn.Embedding(config.max_grid_row_position_embeddings, config.hidden_size) self.col_position_embeddings = nn.Embedding(config.max_grid_col_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(1, config.hidden_size) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def add_2d_positional_embeddings(self, grid): """ Args: grid: (batch_size, height, width, hidden_dim) Returns: grid + col_position_embeddings.view(*col_shape): (batch_size, *, height, width, hidden_dim) """ batch_size, height, width, hidden_dim = grid.shape # add row-wise position embeddings row_position_ids = torch.arange(height, dtype=torch.long, device=grid.device) # (height, ) row_position_embeddings = self.row_position_embeddings(row_position_ids) # (height, hidden_dim) row_shape = (1,) * (len(grid.shape) - 3) + (height, 1, hidden_dim) # (1, height, 1, hidden_dim) grid = grid + row_position_embeddings.view(*row_shape) # broadcast automatically # add column-wise position embeddings col_position_ids = torch.arange(width, dtype=torch.long, device=grid.device) # (width, ) col_position_embeddings = self.col_position_embeddings(col_position_ids) # (width, hidden_dim) col_shape = (batch_size, 1, width, hidden_dim) # (1, 1, width, hidden_dim) return grid + col_position_embeddings.view(*col_shape) # broadcast automatically def forward(self, grid): """ Args: grid: Array of shape (batch_size, num_frames, height, width, num_channels). It contains processed frames extracted from videos, and is generated by Tvp image preprocessor. Note, num_frames can be 1 Returns: embeddings: The embedding of grid with size (batch_size, height*width, num_channels) """ batch_size, num_frames, height, width, num_channels = grid.shape # temporal mean pooling, (batch_size, height, width, hidden_size) grid = grid.mean(1) grid = self.add_2d_positional_embeddings(grid) # image token sequence, (batch_size, height*width, num_channels) visual_tokens = grid.view(batch_size, -1, num_channels) visual_tokens_shape = visual_tokens.shape[:-1] device = visual_tokens.device # image token type embeddings. token_type_ids = torch.zeros(visual_tokens_shape, dtype=torch.long, device=device) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = visual_tokens + token_type_embeddings embeddings = self.layer_norm(embeddings) embeddings = self.dropout(embeddings) return embeddings class TvpTextInputEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] device = input_ids.device if input_ids is not None else inputs_embeds.device if position_ids is None: position_ids = torch.arange(seq_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0).expand(input_shape) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + position_embeddings + token_type_embeddings embeddings = self.layer_norm(embeddings) embeddings = self.dropout(embeddings) return embeddings class TvpAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size {config.hidden_size} is not a multiple of the number of attention heads {config.num_attention_heads}" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.attn_dropout = nn.Dropout(config.attention_probs_dropout_prob) self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return mask = torch.ones(self.num_attention_heads, self.attention_head_size) heads = set(heads) - self.pruned_heads # Convert to set and remove already pruned heads for head in heads: # Compute how many pruned heads are before the head and move the index accordingly head = head - sum(1 if h < head else 0 for h in self.pruned_heads) mask[head] = 0 mask = mask.view(-1).contiguous().eq(1) index = torch.arange(len(mask))[mask].long() # Prune linear layers self.query = prune_linear_layer(self.query, index) self.key = prune_linear_layer(self.key, index) self.value = prune_linear_layer(self.value, index) self.dense = prune_linear_layer(self.dense, index, dim=1) # Update hyper params and store pruned heads self.num_attention_heads = self.num_attention_heads - len(heads) self.all_head_size = self.attention_head_size * self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def _reshape(self, tensor: torch.Tensor, sequence_length: int, batch_size: int): return ( tensor.view(batch_size, sequence_length, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) .contiguous() ) def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions: Optional[bool] = None, ): batch_size, sequence_length = hidden_states.shape[:2] mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self._reshape(mixed_query_layer, sequence_length, batch_size) key_layer = self._reshape(mixed_key_layer, sequence_length, batch_size) value_layer = self._reshape(mixed_value_layer, sequence_length, batch_size) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.attn_dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask attn_output = torch.matmul(attention_probs, value_layer) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(batch_size, sequence_length, self.all_head_size) attn_output = self.dense(attn_output) attn_output = self.dropout(attn_output) attn_output = self.layer_norm(attn_output + hidden_states) # add attentions if we output them outputs = (attn_output, attention_probs) if output_attentions else (attn_output,) return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Tvp class TvpIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class TvpOutputLayer(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.layer_norm(hidden_states + input_tensor) return hidden_states class TvpEncodeLayer(nn.Module): def __init__(self, config): super().__init__() self.attention = TvpAttention(config) self.intermediate = TvpIntermediate(config) self.output = TvpOutputLayer(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions: Optional[bool] = None, ): self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) outputs = (layer_output,) + outputs return outputs class TvpEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([TvpEncodeLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): return_dict = return_dict if return_dict is not None else self.config.return_dict output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) all_hidden_states = () all_attentions = () for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, attention_mask, (head_mask[i] if head_mask is not None else None), output_attentions, ) else: layer_outputs = layer_module(hidden_states, attention_mask, head_mask[i], output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: outputs = (hidden_states,) if output_hidden_states: outputs = outputs + (all_hidden_states,) if output_attentions: outputs = outputs + (all_attentions,) return outputs # last-layer hidden state, (all hidden states), (all attentions) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states if output_hidden_states else None, attentions=all_attentions if output_attentions else None, ) # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->Tvp class TvpPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class TvpPreTrainedModel(PreTrainedModel): """An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = TvpConfig base_model_prefix = "model" supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() if isinstance(module, nn.Conv2d): nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu") if module.bias is not None: nn.init.constant_(module.bias, 0) TVP_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`TvpConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ TVP_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) pixel_values (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`TvpImageProcessor`]. See [`TvpImageProcessor.__call__`] for details. attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class TvpFrameDownPadPrompter(nn.Module): """ Pad frames extracted from videos only at the bottom. """ def __init__(self, config): if config.visual_prompter_apply not in ("add", "replace", "remove"): raise ValueError("`visual_prompter_apply` must be in (add, replace, remove)") super().__init__() self.visual_prompt_size = config.visual_prompt_size self.frame_num = config.frame_num self.max_img_size = config.max_img_size self.visual_prompter_apply = config.visual_prompter_apply self.pad_down = nn.Parameter( torch.randn([1, config.frame_num, 3, config.visual_prompt_size, config.max_img_size]) ) def forward(self, pixel_values): if self.visual_prompter_apply != "add": visual_prompt_mask = torch.ones( [self.max_img_size, self.max_img_size], dtype=pixel_values.dtype, device=pixel_values.device ) visual_prompt_mask[self.max_img_size - self.visual_prompt_size : self.max_img_size, :] = 0.0 pixel_values *= visual_prompt_mask if self.visual_prompter_apply != "remove": prompt = torch.zeros( [pixel_values.shape[0], pixel_values.shape[1], 3, self.max_img_size, self.max_img_size], device=pixel_values.device, ) start_point = self.max_img_size - self.visual_prompt_size prompt[:, :, :, start_point : self.max_img_size, :] = self.pad_down pixel_values += prompt.to(pixel_values.dtype) return pixel_values class TvpFramePadPrompter(nn.Module): """ Pad frames extracted from videos in the surroundings. """ def __init__(self, config): if config.visual_prompter_apply not in ("add", "replace", "remove"): raise ValueError("`visual_prompter_apply` must be in (add, replace, remove)") super().__init__() self.num_frames = config.num_frames self.max_img_size = config.max_img_size self.visual_prompter_apply = config.visual_prompter_apply self.base_size = config.max_img_size - config.visual_prompt_size * 2 self.pad_up = nn.Parameter( torch.randn([1, config.num_frames, 3, config.visual_prompt_size, config.max_img_size]) ) self.pad_down = nn.Parameter( torch.randn([1, config.num_frames, 3, config.visual_prompt_size, config.max_img_size]) ) self.pad_left = nn.Parameter( torch.randn( [ 1, config.num_frames, 3, config.max_img_size - config.visual_prompt_size * 2, config.visual_prompt_size, ] ) ) self.pad_right = nn.Parameter( torch.randn( [ 1, config.num_frames, 3, config.max_img_size - config.visual_prompt_size * 2, config.visual_prompt_size, ] ) ) def forward(self, pixel_values): if self.visual_prompter_apply not in ("add", "remove", "replace"): raise ValueError(f"Invalid visual_prompter_apply value {self.visual_prompter_apply}") if self.visual_prompter_apply in ("replace", "remove"): visual_prompt_mask = torch.ones( [self.max_img_size, self.max_img_size], dtype=pixel_values.dtype, device=pixel_values.device ) pixel_values *= visual_prompt_mask if self.visual_prompter_apply in ("replace", "add"): base = torch.zeros(1, self.num_frames, 3, self.base_size, self.base_size, device=pixel_values.device) prompt = torch.cat([self.pad_left, base, self.pad_right], dim=4) prompt = torch.cat([self.pad_up, prompt, self.pad_down], dim=3) prompt = torch.cat(pixel_values.size(0) * [prompt]) pixel_values += prompt.to(pixel_values.dtype) return pixel_values TVP_PROMPTER_CLASSES_MAPPING = { "framedownpad": TvpFrameDownPadPrompter, "framepad": TvpFramePadPrompter, } @add_start_docstrings( "The bare Tvp Model transformer outputting BaseModelOutputWithPooling object without any specific head on" " top.", TVP_START_DOCSTRING, ) class TvpModel(TvpPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.vision_model = TvpVisionModel(config) self.embeddings = TvpTextInputEmbeddings(config) self.visual_embeddings = TvpVisualInputEmbedding(config) self.encoder = TvpEncoder(config) self.pooler = TvpPooler(config) self.text_prompt = nn.Parameter(torch.randn([1, 10, config.hidden_size])) self.dropout = nn.Dropout(config.hidden_dropout_prob) if config.visual_prompter_type not in TVP_PROMPTER_CLASSES_MAPPING: raise ValueError("`visual_prompter_type` must be in (framedownpad, framepad)") self.visual_prompter = TVP_PROMPTER_CLASSES_MAPPING[config.visual_prompter_type](config) self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(TVP_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=TvpConfig) def forward( self, input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): r""" Returns: Examples: ```python >>> import torch >>> from transformers import AutoConfig, AutoTokenizer, TvpModel >>> model = TvpModel.from_pretrained("Jiqing/tiny-random-tvp") >>> tokenizer = AutoTokenizer.from_pretrained("Jiqing/tiny-random-tvp") >>> pixel_values = torch.rand(1, 1, 3, 448, 448) >>> text_inputs = tokenizer("This is an example input", return_tensors="pt") >>> output = model(text_inputs.input_ids, pixel_values, text_inputs.attention_mask) ```""" return_dict = return_dict if return_dict is not None else self.config.return_dict # Add visual prompt, it compensates for the spatiotemporal information loss in 2D visual features. pixel_values = self.vision_model(self.visual_prompter(pixel_values)) # (batch_size, sequence_length, hidden_size) text_embedding_output = self.embeddings(input_ids=input_ids) # (batch_size, visual_sequence_length, hidden_size) visual_embedding_output = self.visual_embeddings(pixel_values) if attention_mask is not None: # (batch_size, visual_sequence_length) visual_attention_mask = attention_mask.new_ones(visual_embedding_output.shape[:2]) pt_mask = torch.ones(attention_mask.shape[0], 10).to( device=attention_mask.device, dtype=attention_mask.dtype ) attention_mask = torch.cat([pt_mask, attention_mask, visual_attention_mask], dim=-1) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. attention_mask = self.get_extended_attention_mask(attention_mask, input_ids.size()).to(input_ids.device) text_prompt = self.text_prompt.expand(text_embedding_output.shape[0], -1, -1) # (batch_size, sequence_length + visual_sequence_length, hidden_size) embedding_output = torch.cat([text_prompt, text_embedding_output, visual_embedding_output], dim=1) encoder_outputs = self.encoder( embedding_output, attention_mask=attention_mask, head_mask=self.get_head_mask(head_mask, self.config.num_hidden_layers), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = encoder_outputs.last_hidden_state if return_dict else encoder_outputs[0] pooled_output = self.pooler(last_hidden_state) last_hidden_state = self.dropout(last_hidden_state) pooled_output = self.dropout(pooled_output) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class TvpVideoGroundingHead(nn.Module): def __init__(self, config): super().__init__() self.layer_0 = nn.Linear(config.hidden_size, config.hidden_size * 2) self.layer_1 = nn.Linear(config.hidden_size * 2, 2) self.activation_0 = nn.ReLU() self.activation_1 = nn.Sigmoid() def forward(self, pooler_output): logits = self.activation_0(self.layer_0(pooler_output)) logits = self.activation_1(self.layer_1(logits)) return logits @add_start_docstrings( """ Tvp Model with a video grounding head on top computing IoU, distance, and duration loss. """, TVP_START_DOCSTRING, ) class TvpForVideoGrounding(TvpPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.model = TvpModel(config) self.video_grounding_head = TvpVideoGroundingHead(config) self.post_init() @add_start_docstrings_to_model_forward(TVP_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TvpVideoGroundingOutput, config_class=TvpConfig) def forward( self, input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, labels: Tuple[torch.Tensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): r""" labels (`torch.FloatTensor` of shape `(batch_size, 3)`, *optional*): The labels contains duration, start time, and end time of the video corresponding to the text. Returns: Examples: ```python >>> import torch >>> from transformers import AutoConfig, AutoTokenizer, TvpForVideoGrounding >>> model = TvpForVideoGrounding.from_pretrained("Jiqing/tiny-random-tvp") >>> tokenizer = AutoTokenizer.from_pretrained("Jiqing/tiny-random-tvp") >>> pixel_values = torch.rand(1, 1, 3, 448, 448) >>> text_inputs = tokenizer("This is an example input", return_tensors="pt") >>> output = model(text_inputs.input_ids, pixel_values, text_inputs.attention_mask) ```""" return_dict = return_dict if return_dict is not None else self.config.return_dict outputs = self.model( input_ids, pixel_values, attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooler_output = outputs[1] logits = self.video_grounding_head(pooler_output) loss = None if labels is not None: criterion = TvpLoss(["iou", "distance", "duration"]) criterion.to(self.device) loss_dict = criterion(logits, labels) loss = ( loss_dict["iou"] + self.config.distance_loss_weight * loss_dict["distance"] + self.config.duration_loss_weight * loss_dict["duration"] ) if not return_dict: outputs = (logits,) + outputs[2:] if loss is not None: outputs = (loss,) + outputs return outputs return TvpVideoGroundingOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
transformers/src/transformers/models/tvp/modeling_tvp.py/0
{ "file_path": "transformers/src/transformers/models/tvp/modeling_tvp.py", "repo_id": "transformers", "token_count": 16441 }
367
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ UnivNetModel model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) UNIVNET_PRETRAINED_CONFIG_ARCHIVE_MAP = { "dg845/univnet-dev": "https://huggingface.co/dg845/univnet-dev/resolve/main/config.json", } class UnivNetConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`UnivNetModel`]. It is used to instantiate a UnivNet vocoder model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the UnivNet [dg845/univnet-dev](https://huggingface.co/dg845/univnet-dev) architecture, which corresponds to the 'c32' architecture in [maum-ai/univnet](https://github.com/maum-ai/univnet/blob/master/config/default_c32.yaml). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: model_in_channels (`int`, *optional*, defaults to 64): The number of input channels for the UnivNet residual network. This should correspond to `noise_sequence.shape[1]` and the value used in the [`UnivNetFeatureExtractor`] class. model_hidden_channels (`int`, *optional*, defaults to 32): The number of hidden channels of each residual block in the UnivNet residual network. num_mel_bins (`int`, *optional*, defaults to 100): The number of frequency bins in the conditioning log-mel spectrogram. This should correspond to the value used in the [`UnivNetFeatureExtractor`] class. resblock_kernel_sizes (`Tuple[int]` or `List[int]`, *optional*, defaults to `[3, 3, 3]`): A tuple of integers defining the kernel sizes of the 1D convolutional layers in the UnivNet residual network. The length of `resblock_kernel_sizes` defines the number of resnet blocks and should match that of `resblock_stride_sizes` and `resblock_dilation_sizes`. resblock_stride_sizes (`Tuple[int]` or `List[int]`, *optional*, defaults to `[8, 8, 4]`): A tuple of integers defining the stride sizes of the 1D convolutional layers in the UnivNet residual network. The length of `resblock_stride_sizes` should match that of `resblock_kernel_sizes` and `resblock_dilation_sizes`. resblock_dilation_sizes (`Tuple[Tuple[int]]` or `List[List[int]]`, *optional*, defaults to `[[1, 3, 9, 27], [1, 3, 9, 27], [1, 3, 9, 27]]`): A nested tuple of integers defining the dilation rates of the dilated 1D convolutional layers in the UnivNet residual network. The length of `resblock_dilation_sizes` should match that of `resblock_kernel_sizes` and `resblock_stride_sizes`. The length of each nested list in `resblock_dilation_sizes` defines the number of convolutional layers per resnet block. kernel_predictor_num_blocks (`int`, *optional*, defaults to 3): The number of residual blocks in the kernel predictor network, which calculates the kernel and bias for each location variable convolution layer in the UnivNet residual network. kernel_predictor_hidden_channels (`int`, *optional*, defaults to 64): The number of hidden channels for each residual block in the kernel predictor network. kernel_predictor_conv_size (`int`, *optional*, defaults to 3): The kernel size of each 1D convolutional layer in the kernel predictor network. kernel_predictor_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for each residual block in the kernel predictor network. initializer_range (`float`, *optional*, defaults to 0.01): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. leaky_relu_slope (`float`, *optional*, defaults to 0.2): The angle of the negative slope used by the leaky ReLU activation. Example: ```python >>> from transformers import UnivNetModel, UnivNetConfig >>> # Initializing a Tortoise TTS style configuration >>> configuration = UnivNetConfig() >>> # Initializing a model (with random weights) from the Tortoise TTS style configuration >>> model = UnivNetModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "univnet" def __init__( self, model_in_channels=64, model_hidden_channels=32, num_mel_bins=100, resblock_kernel_sizes=[3, 3, 3], resblock_stride_sizes=[8, 8, 4], resblock_dilation_sizes=[[1, 3, 9, 27], [1, 3, 9, 27], [1, 3, 9, 27]], kernel_predictor_num_blocks=3, kernel_predictor_hidden_channels=64, kernel_predictor_conv_size=3, kernel_predictor_dropout=0.0, initializer_range=0.01, leaky_relu_slope=0.2, **kwargs, ): if not (len(resblock_kernel_sizes) == len(resblock_stride_sizes) == len(resblock_dilation_sizes)): raise ValueError( "`resblock_kernel_sizes`, `resblock_stride_sizes`, and `resblock_dilation_sizes` must all have the" " same length (which will be the number of resnet blocks in the model)." ) self.model_in_channels = model_in_channels self.model_hidden_channels = model_hidden_channels self.num_mel_bins = num_mel_bins self.resblock_kernel_sizes = resblock_kernel_sizes self.resblock_stride_sizes = resblock_stride_sizes self.resblock_dilation_sizes = resblock_dilation_sizes self.kernel_predictor_num_blocks = kernel_predictor_num_blocks self.kernel_predictor_hidden_channels = kernel_predictor_hidden_channels self.kernel_predictor_conv_size = kernel_predictor_conv_size self.kernel_predictor_dropout = kernel_predictor_dropout self.initializer_range = initializer_range self.leaky_relu_slope = leaky_relu_slope super().__init__(**kwargs)
transformers/src/transformers/models/univnet/configuration_univnet.py/0
{ "file_path": "transformers/src/transformers/models/univnet/configuration_univnet.py", "repo_id": "transformers", "token_count": 2525 }
368
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ VilT model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) VILT_PRETRAINED_CONFIG_ARCHIVE_MAP = { "dandelin/vilt-b32-mlm": "https://huggingface.co/dandelin/vilt-b32-mlm/blob/main/config.json" } class ViltConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ViLTModel`]. It is used to instantiate an ViLT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ViLT [dandelin/vilt-b32-mlm](https://huggingface.co/dandelin/vilt-b32-mlm) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the text part of the model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`ViltModel`]. type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`ViltModel`]. This is used when encoding text. modality_type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the modalities passed when calling [`ViltModel`]. This is used after concatening the embeddings of the text and image modalities. max_position_embeddings (`int`, *optional*, defaults to 40): The maximum sequence length that this model might ever be used with. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. image_size (`int`, *optional*, defaults to 384): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 32): The size (resolution) of each patch. num_channels (`int`, *optional*, defaults to 3): The number of input channels. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries, keys and values. max_image_length (`int`, *optional*, defaults to -1): The maximum number of patches to take as input for the Transformer encoder. If set to a positive integer, the encoder will sample `max_image_length` patches at maximum. If set to -1, will not be taken into account. num_images (`int`, *optional*, defaults to -1): The number of images to use for natural language visual reasoning. If set to a positive integer, will be used by [`ViltForImagesAndTextClassification`] for defining the classifier head. Example: ```python >>> from transformers import ViLTModel, ViLTConfig >>> # Initializing a ViLT dandelin/vilt-b32-mlm style configuration >>> configuration = ViLTConfig() >>> # Initializing a model from the dandelin/vilt-b32-mlm style configuration >>> model = ViLTModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "vilt" def __init__( self, vocab_size=30522, type_vocab_size=2, modality_type_vocab_size=2, max_position_embeddings=40, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, image_size=384, patch_size=32, num_channels=3, qkv_bias=True, max_image_length=-1, tie_word_embeddings=False, num_images=-1, **kwargs, ): super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs) self.vocab_size = vocab_size self.type_vocab_size = type_vocab_size self.modality_type_vocab_size = modality_type_vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.qkv_bias = qkv_bias self.max_image_length = max_image_length self.num_images = num_images
transformers/src/transformers/models/vilt/configuration_vilt.py/0
{ "file_path": "transformers/src/transformers/models/vilt/configuration_vilt.py", "repo_id": "transformers", "token_count": 2640 }
369
# coding=utf-8 # Copyright The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ VisionTextDualEncoder model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig from ..clip.configuration_clip import CLIPVisionConfig logger = logging.get_logger(__name__) class VisionTextDualEncoderConfig(PretrainedConfig): r""" [`VisionTextDualEncoderConfig`] is the configuration class to store the configuration of a [`VisionTextDualEncoderModel`]. It is used to instantiate [`VisionTextDualEncoderModel`] model according to the specified arguments, defining the text model and vision model configs. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: projection_dim (`int`, *optional*, defaults to 512): Dimentionality of text and vision projection layers. logit_scale_init_value (`float`, *optional*, defaults to 2.6592): The inital value of the *logit_scale* paramter. Default is used as per the original CLIP implementation. kwargs (*optional*): Dictionary of keyword arguments. Examples: ```python >>> from transformers import ViTConfig, BertConfig, VisionTextDualEncoderConfig, VisionTextDualEncoderModel >>> # Initializing a BERT and ViT configuration >>> config_vision = ViTConfig() >>> config_text = BertConfig() >>> config = VisionTextDualEncoderConfig.from_vision_text_configs(config_vision, config_text, projection_dim=512) >>> # Initializing a BERT and ViT model (with random weights) >>> model = VisionTextDualEncoderModel(config=config) >>> # Accessing the model configuration >>> config_vision = model.config.vision_config >>> config_text = model.config.text_config >>> # Saving the model, including its configuration >>> model.save_pretrained("vit-bert") >>> # loading model and config from pretrained folder >>> vision_text_config = VisionTextDualEncoderConfig.from_pretrained("vit-bert") >>> model = VisionTextDualEncoderModel.from_pretrained("vit-bert", config=vision_text_config) ```""" model_type = "vision-text-dual-encoder" is_composition = True def __init__(self, projection_dim=512, logit_scale_init_value=2.6592, **kwargs): super().__init__(**kwargs) if "vision_config" not in kwargs: raise ValueError("`vision_config` can not be `None`.") if "text_config" not in kwargs: raise ValueError("`text_config` can not be `None`.") vision_config = kwargs.pop("vision_config") text_config = kwargs.pop("text_config") vision_model_type = vision_config.pop("model_type") text_model_type = text_config.pop("model_type") if vision_model_type == "clip": self.vision_config = AutoConfig.for_model(vision_model_type, **vision_config).vision_config elif vision_model_type == "clip_vision_model": self.vision_config = CLIPVisionConfig(**vision_config) else: self.vision_config = AutoConfig.for_model(vision_model_type, **vision_config) self.text_config = AutoConfig.for_model(text_model_type, **text_config) self.projection_dim = projection_dim self.logit_scale_init_value = logit_scale_init_value @classmethod def from_vision_text_configs(cls, vision_config: PretrainedConfig, text_config: PretrainedConfig, **kwargs): r""" Instantiate a [`VisionTextDualEncoderConfig`] (or a derived class) from text model configuration and vision model configuration. Returns: [`VisionTextDualEncoderConfig`]: An instance of a configuration object """ return cls(vision_config=vision_config.to_dict(), text_config=text_config.to_dict(), **kwargs)
transformers/src/transformers/models/vision_text_dual_encoder/configuration_vision_text_dual_encoder.py/0
{ "file_path": "transformers/src/transformers/models/vision_text_dual_encoder/configuration_vision_text_dual_encoder.py", "repo_id": "transformers", "token_count": 1524 }
370
# coding=utf-8 # Copyright 2021 Google AI, Ross Wightman, The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 ViT model.""" from __future__ import annotations import collections.abc import math from typing import Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling, TFSequenceClassifierOutput from ...modeling_tf_utils import ( TFModelInputType, TFPreTrainedModel, TFSequenceClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import shape_list, stable_softmax from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_vit import ViTConfig logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "ViTConfig" # Base docstring _CHECKPOINT_FOR_DOC = "google/vit-base-patch16-224-in21k" _EXPECTED_OUTPUT_SHAPE = [1, 197, 768] # Image classification docstring _IMAGE_CLASS_CHECKPOINT = "google/vit-base-patch16-224" _IMAGE_CLASS_EXPECTED_OUTPUT = "Egyptian cat" class TFViTEmbeddings(keras.layers.Layer): """ Construct the CLS token, position and patch embeddings. """ def __init__(self, config: ViTConfig, **kwargs): super().__init__(**kwargs) self.patch_embeddings = TFViTPatchEmbeddings(config, name="patch_embeddings") self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def build(self, input_shape=None): num_patches = self.patch_embeddings.num_patches self.cls_token = self.add_weight( shape=(1, 1, self.config.hidden_size), initializer=get_initializer(self.config.initializer_range), trainable=True, name="cls_token", ) self.position_embeddings = self.add_weight( shape=(1, num_patches + 1, self.config.hidden_size), initializer=get_initializer(self.config.initializer_range), trainable=True, name="position_embeddings", ) if self.built: return self.built = True if getattr(self, "patch_embeddings", None) is not None: with tf.name_scope(self.patch_embeddings.name): self.patch_embeddings.build(None) def interpolate_pos_encoding(self, embeddings, height, width) -> tf.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. Source: https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174 """ batch_size, seq_len, dim = shape_list(embeddings) num_patches = seq_len - 1 _, num_positions, _ = shape_list(self.position_embeddings) num_positions -= 1 if num_patches == num_positions and height == width: return self.position_embeddings class_pos_embed = self.position_embeddings[:, :1] patch_pos_embed = self.position_embeddings[:, 1:] h0 = height // self.config.patch_size w0 = width // self.config.patch_size patch_pos_embed = tf.image.resize( images=tf.reshape( patch_pos_embed, shape=(1, int(math.sqrt(num_positions)), int(math.sqrt(num_positions)), dim) ), size=(h0, w0), method="bicubic", ) shape = shape_list(patch_pos_embed) assert h0 == shape[-3] and w0 == shape[-2] patch_pos_embed = tf.reshape(tensor=patch_pos_embed, shape=(1, -1, dim)) return tf.concat(values=(class_pos_embed, patch_pos_embed), axis=1) def call( self, pixel_values: tf.Tensor, interpolate_pos_encoding: bool = False, training: bool = False ) -> tf.Tensor: batch_size, num_channels, height, width = shape_list(pixel_values) embeddings = self.patch_embeddings( pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, training=training ) # add the [CLS] token to the embedded patch tokens cls_tokens = tf.repeat(self.cls_token, repeats=batch_size, axis=0) embeddings = tf.concat((cls_tokens, embeddings), axis=1) # add positional encoding to each token if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embeddings embeddings = self.dropout(embeddings, training=training) return embeddings # Based on timm implementation, which can be found here: # https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py class TFViTPatchEmbeddings(keras.layers.Layer): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config: ViTConfig, **kwargs): super().__init__(**kwargs) image_size, patch_size = config.image_size, config.patch_size num_channels, hidden_size = config.num_channels, config.hidden_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_patches = num_patches self.num_channels = num_channels self.config = config self.projection = keras.layers.Conv2D( filters=hidden_size, kernel_size=patch_size, strides=patch_size, padding="valid", data_format="channels_last", use_bias=True, kernel_initializer=get_initializer(self.config.initializer_range), bias_initializer="zeros", name="projection", ) def call( self, pixel_values: tf.Tensor, interpolate_pos_encoding: bool = False, training: bool = False ) -> tf.Tensor: batch_size, num_channels, height, width = shape_list(pixel_values) if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) if not interpolate_pos_encoding: if tf.executing_eagerly(): if height != self.image_size[0] or width != self.image_size[1]: raise ValueError( f"Input image size ({height}*{width}) doesn't match model" f" ({self.image_size[0]}*{self.image_size[1]})." ) # When running on CPU, `keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1)) projection = self.projection(pixel_values) # Change the 2D spatial dimensions to a single temporal dimension. # shape = (batch_size, num_patches, out_channels=embed_dim) num_patches = (width // self.patch_size[1]) * (height // self.patch_size[0]) embeddings = tf.reshape(tensor=projection, shape=(batch_size, num_patches, -1)) return embeddings def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "projection", None) is not None: with tf.name_scope(self.projection.name): self.projection.build([None, None, None, self.num_channels]) class TFViTSelfAttention(keras.layers.Layer): def __init__(self, config: ViTConfig, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number " f"of attention heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.sqrt_att_head_size = math.sqrt(self.attention_head_size) self.query = keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query" ) self.key = keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key" ) self.value = keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value" ) self.dropout = keras.layers.Dropout(rate=config.attention_probs_dropout_prob) self.config = config def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor: # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size] tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size)) # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size] return tf.transpose(tensor, perm=[0, 2, 1, 3]) def call( self, hidden_states: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, training: bool = False, ) -> Tuple[tf.Tensor]: batch_size = shape_list(hidden_states)[0] mixed_query_layer = self.query(inputs=hidden_states) mixed_key_layer = self.key(inputs=hidden_states) mixed_value_layer = self.value(inputs=hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) # Take the dot product between "query" and "key" to get the raw attention scores. # (batch size, num_heads, seq_len_q, seq_len_k) attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype) attention_scores = tf.divide(attention_scores, dk) # Normalize the attention scores to probabilities. attention_probs = stable_softmax(logits=attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(inputs=attention_probs, training=training) # Mask heads if we want to if head_mask is not None: attention_probs = tf.multiply(attention_probs, head_mask) attention_output = tf.matmul(attention_probs, value_layer) attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, all_head_size) attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size)) outputs = (attention_output, attention_probs) if output_attentions else (attention_output,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "query", None) is not None: with tf.name_scope(self.query.name): self.query.build([None, None, self.config.hidden_size]) if getattr(self, "key", None) is not None: with tf.name_scope(self.key.name): self.key.build([None, None, self.config.hidden_size]) if getattr(self, "value", None) is not None: with tf.name_scope(self.value.name): self.value.build([None, None, self.config.hidden_size]) class TFViTSelfOutput(keras.layers.Layer): """ The residual connection is defined in TFViTLayer instead of here (as is the case with other models), due to the layernorm applied before each block. """ def __init__(self, config: ViTConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) class TFViTAttention(keras.layers.Layer): def __init__(self, config: ViTConfig, **kwargs): super().__init__(**kwargs) self.self_attention = TFViTSelfAttention(config, name="attention") self.dense_output = TFViTSelfOutput(config, name="output") def prune_heads(self, heads): raise NotImplementedError def call( self, input_tensor: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, training: bool = False, ) -> Tuple[tf.Tensor]: self_outputs = self.self_attention( hidden_states=input_tensor, head_mask=head_mask, output_attentions=output_attentions, training=training ) attention_output = self.dense_output( hidden_states=self_outputs[0], input_tensor=input_tensor, training=training ) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attention", None) is not None: with tf.name_scope(self.self_attention.name): self.self_attention.build(None) if getattr(self, "dense_output", None) is not None: with tf.name_scope(self.dense_output.name): self.dense_output.build(None) class TFViTIntermediate(keras.layers.Layer): def __init__(self, config: ViTConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) if isinstance(config.hidden_act, str): self.intermediate_act_fn = get_tf_activation(config.hidden_act) else: self.intermediate_act_fn = config.hidden_act self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) class TFViTOutput(keras.layers.Layer): def __init__(self, config: ViTConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = hidden_states + input_tensor return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.intermediate_size]) class TFViTLayer(keras.layers.Layer): """This corresponds to the Block class in the timm implementation.""" def __init__(self, config: ViTConfig, **kwargs): super().__init__(**kwargs) self.attention = TFViTAttention(config, name="attention") self.intermediate = TFViTIntermediate(config, name="intermediate") self.vit_output = TFViTOutput(config, name="output") self.layernorm_before = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm_before") self.layernorm_after = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm_after") self.config = config def call( self, hidden_states: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, training: bool = False, ) -> Tuple[tf.Tensor]: attention_outputs = self.attention( # in ViT, layernorm is applied before self-attention input_tensor=self.layernorm_before(inputs=hidden_states), head_mask=head_mask, output_attentions=output_attentions, training=training, ) attention_output = attention_outputs[0] # first residual connection hidden_states = attention_output + hidden_states # in ViT, layernorm is also applied after self-attention layer_output = self.layernorm_after(inputs=hidden_states) intermediate_output = self.intermediate(hidden_states=layer_output) # second residual connection is done here layer_output = self.vit_output( hidden_states=intermediate_output, input_tensor=hidden_states, training=training ) outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, "intermediate", None) is not None: with tf.name_scope(self.intermediate.name): self.intermediate.build(None) if getattr(self, "vit_output", None) is not None: with tf.name_scope(self.vit_output.name): self.vit_output.build(None) if getattr(self, "layernorm_before", None) is not None: with tf.name_scope(self.layernorm_before.name): self.layernorm_before.build([None, None, self.config.hidden_size]) if getattr(self, "layernorm_after", None) is not None: with tf.name_scope(self.layernorm_after.name): self.layernorm_after.build([None, None, self.config.hidden_size]) class TFViTEncoder(keras.layers.Layer): def __init__(self, config: ViTConfig, **kwargs): super().__init__(**kwargs) self.layer = [TFViTLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)] def call( self, hidden_states: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, output_hidden_states: bool, return_dict: bool, training: bool = False, ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states=hidden_states, head_mask=head_mask[i], output_attentions=output_attentions, training=training, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layer", None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None) @keras_serializable class TFViTMainLayer(keras.layers.Layer): config_class = ViTConfig def __init__(self, config: ViTConfig, add_pooling_layer: bool = True, **kwargs): super().__init__(**kwargs) self.config = config self.embeddings = TFViTEmbeddings(config, name="embeddings") self.encoder = TFViTEncoder(config, name="encoder") self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm") self.pooler = TFViTPooler(config, name="pooler") if add_pooling_layer else None def get_input_embeddings(self) -> keras.layers.Layer: return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ raise NotImplementedError @unpack_inputs def call( self, pixel_values: TFModelInputType | None = None, head_mask: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: if pixel_values is None: raise ValueError("You have to specify pixel_values") embedding_output = self.embeddings( pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, training=training, ) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.config.num_hidden_layers encoder_outputs = self.encoder( hidden_states=embedding_output, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(inputs=sequence_output) pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "layernorm", None) is not None: with tf.name_scope(self.layernorm.name): self.layernorm.build([None, None, self.config.hidden_size]) if getattr(self, "pooler", None) is not None: with tf.name_scope(self.pooler.name): self.pooler.build(None) class TFViTPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = ViTConfig base_model_prefix = "vit" main_input_name = "pixel_values" VIT_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `pixel_values` only and nothing else: `model(pixel_values)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([pixel_values, attention_mask])` or `model([pixel_values, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Args: config ([`ViTConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ VIT_INPUTS_DOCSTRING = r""" Args: pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`] for details. head_mask (`np.ndarray` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. interpolate_pos_encoding (`bool`, *optional*): Whether to interpolate the pre-trained position encodings. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False``): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare ViT Model transformer outputting raw hidden-states without any specific head on top.", VIT_START_DOCSTRING, ) class TFViTModel(TFViTPreTrainedModel): def __init__(self, config: ViTConfig, *inputs, add_pooling_layer=True, **kwargs): super().__init__(config, *inputs, **kwargs) self.vit = TFViTMainLayer(config, add_pooling_layer=add_pooling_layer, name="vit") @unpack_inputs @add_start_docstrings_to_model_forward(VIT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def call( self, pixel_values: TFModelInputType | None = None, head_mask: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: outputs = self.vit( pixel_values=pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, training=training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "vit", None) is not None: with tf.name_scope(self.vit.name): self.vit.build(None) class TFViTPooler(keras.layers.Layer): def __init__(self, config: ViTConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation="tanh", name="dense", ) self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(inputs=first_token_tensor) return pooled_output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) @add_start_docstrings( """ ViT Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet. <Tip> Note that it's possible to fine-tune ViT on higher resolution images than the ones it has been trained on, by setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained position embeddings to the higher resolution. </Tip> """, VIT_START_DOCSTRING, ) class TFViTForImageClassification(TFViTPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config: ViTConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.vit = TFViTMainLayer(config, add_pooling_layer=False, name="vit") # Classifier head self.classifier = keras.layers.Dense( units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier", ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(VIT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def call( self, pixel_values: TFModelInputType | None = None, head_mask: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ outputs = self.vit( pixel_values=pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, training=training, ) sequence_output = outputs[0] logits = self.classifier(inputs=sequence_output[:, 0, :]) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "vit", None) is not None: with tf.name_scope(self.vit.name): self.vit.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size])
transformers/src/transformers/models/vit/modeling_tf_vit.py/0
{ "file_path": "transformers/src/transformers/models/vit/modeling_tf_vit.py", "repo_id": "transformers", "token_count": 15720 }
371
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for Vivit.""" from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( get_resize_output_image_size, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL logger = logging.get_logger(__name__) def make_batched(videos) -> List[List[ImageInput]]: if isinstance(videos, (list, tuple)) and isinstance(videos[0], (list, tuple)) and is_valid_image(videos[0][0]): return videos elif isinstance(videos, (list, tuple)) and is_valid_image(videos[0]): return [videos] elif is_valid_image(videos): return [[videos]] raise ValueError(f"Could not make batched video from {videos}") class VivitImageProcessor(BaseImageProcessor): r""" Constructs a Vivit image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 256}`): Size of the output image after resizing. The shortest edge of the image will be resized to `size["shortest_edge"]` while maintaining the aspect ratio of the original image. Can be overriden by `size` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the `preprocess` method. do_center_crop (`bool`, *optional*, defaults to `True`): Whether to center crop the image to the specified `crop_size`. Can be overridden by the `do_center_crop` parameter in the `preprocess` method. crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`): Size of the image after applying the center crop. Can be overridden by the `crop_size` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/127.5`): Defines the scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. offset (`bool`, *optional*, defaults to `True`): Whether to scale the image in both negative and positive directions. Can be overriden by the `offset` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_center_crop: bool = True, crop_size: Dict[str, int] = None, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 127.5, offset: bool = True, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"shortest_edge": 256} size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224} crop_size = get_size_dict(crop_size, param_name="crop_size") self.do_resize = do_resize self.size = size self.do_center_crop = do_center_crop self.crop_size = crop_size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.offset = offset self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Size of the output image. If `size` is of the form `{"height": h, "width": w}`, the output image will have the size `(h, w)`. If `size` is of the form `{"shortest_edge": s}`, the output image will have its shortest edge of length `s` while keeping the aspect ratio of the original image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ size = get_size_dict(size, default_to_square=False) if "shortest_edge" in size: output_size = get_resize_output_image_size( image, size["shortest_edge"], default_to_square=False, input_data_format=input_data_format ) elif "height" in size and "width" in size: output_size = (size["height"], size["width"]) else: raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}") return resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) # Copied from transformers.models.efficientnet.image_processing_efficientnet.EfficientNetImageProcessor.rescale def rescale( self, image: np.ndarray, scale: Union[int, float], offset: bool = True, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ): """ Rescale an image by a scale factor. If `offset` is `True`, the image has its values rescaled by `scale` and then offset by 1. If `scale` is 1/127.5, the image is rescaled between [-1, 1]. image = image * scale - 1 If `offset` is `False`, and `scale` is 1/255, the image is rescaled between [0, 1]. image = image * scale Args: image (`np.ndarray`): Image to rescale. scale (`int` or `float`): Scale to apply to the image. offset (`bool`, *optional*): Whether to scale the image in both negative and positive directions. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ rescaled_image = rescale( image, scale=scale, data_format=data_format, input_data_format=input_data_format, **kwargs ) if offset: rescaled_image = rescaled_image - 1 return rescaled_image def _preprocess_image( self, image: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, resample: PILImageResampling = None, do_center_crop: bool = None, crop_size: Dict[str, int] = None, do_rescale: bool = None, rescale_factor: float = None, offset: bool = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """Preprocesses a single image.""" if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True.") if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True.") if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True.") if offset and not do_rescale: raise ValueError("For offset, do_rescale must also be set to True.") # All transformations expect numpy arrays. image = to_numpy_array(image) if is_scaled_image(image) and do_rescale: logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: input_data_format = infer_channel_dimension_format(image) if do_resize: image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) if do_center_crop: image = self.center_crop(image, size=crop_size, input_data_format=input_data_format) if do_rescale: image = self.rescale(image=image, scale=rescale_factor, offset=offset, input_data_format=input_data_format) if do_normalize: image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) return image def preprocess( self, videos: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, resample: PILImageResampling = None, do_center_crop: bool = None, crop_size: Dict[str, int] = None, do_rescale: bool = None, rescale_factor: float = None, offset: bool = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: videos (`ImageInput`): Video frames to preprocess. Expects a single or batch of video frames with pixel values ranging from 0 to 255. If passing in frames with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after applying resize. resample (`PILImageResampling`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_centre_crop`): Whether to centre crop the image. crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the image after applying the centre crop. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between `[-1 - 1]` if `offset` is `True`, `[0, 1]` otherwise. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. offset (`bool`, *optional*, defaults to `self.offset`): Whether to scale the image in both negative and positive directions. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the inferred channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor offset = offset if offset is not None else self.offset do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size, param_name="crop_size") if not valid_images(videos): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) videos = make_batched(videos) videos = [ [ self._preprocess_image( image=img, do_resize=do_resize, size=size, resample=resample, do_center_crop=do_center_crop, crop_size=crop_size, do_rescale=do_rescale, rescale_factor=rescale_factor, offset=offset, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, data_format=data_format, input_data_format=input_data_format, ) for img in video ] for video in videos ] data = {"pixel_values": videos} return BatchFeature(data=data, tensor_type=return_tensors)
transformers/src/transformers/models/vivit/image_processing_vivit.py/0
{ "file_path": "transformers/src/transformers/models/vivit/image_processing_vivit.py", "repo_id": "transformers", "token_count": 8101 }
372
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Speech processor class for Wav2Vec2-BERT """ import warnings from ...processing_utils import ProcessorMixin from ..seamless_m4t.feature_extraction_seamless_m4t import SeamlessM4TFeatureExtractor from ..wav2vec2.tokenization_wav2vec2 import Wav2Vec2CTCTokenizer class Wav2Vec2BertProcessor(ProcessorMixin): r""" Constructs a Wav2Vec2-BERT processor which wraps a Wav2Vec2-BERT feature extractor and a Wav2Vec2 CTC tokenizer into a single processor. [`Wav2Vec2Processor`] offers all the functionalities of [`SeamlessM4TFeatureExtractor`] and [`PreTrainedTokenizer`]. See the docstring of [`~Wav2Vec2Processor.__call__`] and [`~Wav2Vec2Processor.decode`] for more information. Args: feature_extractor (`SeamlessM4TFeatureExtractor`): An instance of [`SeamlessM4TFeatureExtractor`]. The feature extractor is a required input. tokenizer ([`PreTrainedTokenizer`]): An instance of [`PreTrainedTokenizer`]. The tokenizer is a required input. """ feature_extractor_class = "SeamlessM4TFeatureExtractor" tokenizer_class = "AutoTokenizer" def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): try: return super().from_pretrained(pretrained_model_name_or_path, **kwargs) except OSError: warnings.warn( f"Loading a tokenizer inside {cls.__name__} from a config that does not" " include a `tokenizer_class` attribute is deprecated and will be " "removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`" " attribute to either your `config.json` or `tokenizer_config.json` " "file to suppress this warning: ", FutureWarning, ) feature_extractor = SeamlessM4TFeatureExtractor.from_pretrained(pretrained_model_name_or_path, **kwargs) tokenizer = Wav2Vec2CTCTokenizer.from_pretrained(pretrained_model_name_or_path, **kwargs) return cls(feature_extractor=feature_extractor, tokenizer=tokenizer) def __call__(self, audio=None, text=None, **kwargs): """ Main method to prepare for the model one or several sequences(s) and audio(s). This method forwards the `audio` and `kwargs` arguments to SeamlessM4TFeatureExtractor's [`~SeamlessM4TFeatureExtractor.__call__`] if `audio` is not `None` to pre-process the audio. To prepare the target sequences(s), this method forwards the `text` and `kwargs` arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.__call__`] if `text` is not `None`. Please refer to the doctsring of the above two methods for more information. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). audio (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`): The audio or batch of audios to be prepared. Each audio can be NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each audio should be of shape (C, T), where C is a number of channels, and T the sample length of the audio. kwargs (*optional*): Remaining dictionary of keyword arguments that will be passed to the feature extractor and/or the tokenizer. Returns: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_features** -- Audio input features to be fed to a model. Returned when `audio` is not `None`. - **attention_mask** -- List of indices specifying which timestamps should be attended to by the model when `audio` is not `None`. When only `text` is specified, returns the token attention mask. - **labels** -- List of token ids to be fed to a model. Returned when both `text` and `audio` are not `None`. - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None` and `audio` is `None`. """ sampling_rate = kwargs.pop("sampling_rate", None) if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process.") if audio is not None: inputs = self.feature_extractor(audio, sampling_rate=sampling_rate, **kwargs) if text is not None: encodings = self.tokenizer(text, **kwargs) if text is None: return inputs elif audio is None: return encodings else: inputs["labels"] = encodings["input_ids"] return inputs def pad(self, input_features=None, labels=None, **kwargs): """ If `input_features` is not `None`, this method forwards the `input_features` and `kwargs` arguments to SeamlessM4TFeatureExtractor's [`~SeamlessM4TFeatureExtractor.pad`] to pad the input features. If `labels` is not `None`, this method forwards the `labels` and `kwargs` arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.pad`] to pad the label(s). Please refer to the doctsring of the above two methods for more information. """ if input_features is None and labels is None: raise ValueError("You need to specify either an `input_features` or `labels` input to pad.") if input_features is not None: input_features = self.feature_extractor.pad(input_features, **kwargs) if labels is not None: labels = self.tokenizer.pad(labels, **kwargs) if labels is None: return input_features elif input_features is None: return labels else: input_features["labels"] = labels["input_ids"] return input_features def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs)
transformers/src/transformers/models/wav2vec2_bert/processing_wav2vec2_bert.py/0
{ "file_path": "transformers/src/transformers/models/wav2vec2_bert/processing_wav2vec2_bert.py", "repo_id": "transformers", "token_count": 2889 }
373
#!/usr/bin/env python """Converts a Whisper model in OpenAI format to Hugging Face format.""" # Copyright 2022 The HuggingFace Inc. team and the OpenAI team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import io import json import os import tempfile import urllib import warnings from typing import Any, Optional, Tuple import torch from huggingface_hub.utils import insecure_hashlib from torch import nn from tqdm import tqdm from transformers import ( GenerationConfig, WhisperConfig, WhisperFeatureExtractor, WhisperForConditionalGeneration, WhisperProcessor, WhisperTokenizer, WhisperTokenizerFast, ) from transformers.models.whisper.tokenization_whisper import LANGUAGES, bytes_to_unicode from transformers.utils.import_utils import _is_package_available _MODELS = { "tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt", "tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt", "base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt", "base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt", "small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt", "small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt", "medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt", "medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt", "large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt", "large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt", "large-v3": "https://openaipublic.azureedge.net/main/whisper/models/e5b1a55b89c1367dacf97e3e19bfd829a01529dbfdeefa8caeb59b3f1b81dadb/large-v3.pt", } _TOKENIZERS = { "multilingual": "https://raw.githubusercontent.com/openai/whisper/main/whisper/assets/multilingual.tiktoken", "english": "https://raw.githubusercontent.com/openai/whisper/main/whisper/assets/gpt2.tiktoken", } def _get_generation_config( is_multilingual: bool, num_languages: int = 100, openai_version: Optional[str] = None, ) -> GenerationConfig: """ Loads the appropriate generation config from HF repo """ if openai_version is not None: repo = f"openai/whisper-{openai_version}" elif not is_multilingual: repo = "openai/whisper-medium.en" elif num_languages < 100: repo = "openai/whisper-large-v2" else: repo = "openai/whisper-large-v3" gen_cfg = GenerationConfig.from_pretrained(repo) if openai_version is None: gen_cfg.alignment_heads = None warnings.warn( "Alignment heads have not been included in the generation config, since they are available " "only for the original OpenAI checkpoints." "If you want to use word-level timestamps with a custom version of Whisper," "see https://github.com/openai/whisper/blob/main/notebooks/Multilingual_ASR.ipynb" "for the example of how to produce word-level timestamps manually." ) return gen_cfg def remove_ignore_keys_(state_dict): ignore_keys = ["layers", "blocks"] for k in ignore_keys: state_dict.pop(k, None) WHISPER_MAPPING = { "blocks": "layers", "mlp.0": "fc1", "mlp.2": "fc2", "mlp_ln": "final_layer_norm", ".attn.query": ".self_attn.q_proj", ".attn.key": ".self_attn.k_proj", ".attn.value": ".self_attn.v_proj", ".attn_ln": ".self_attn_layer_norm", ".attn.out": ".self_attn.out_proj", ".cross_attn.query": ".encoder_attn.q_proj", ".cross_attn.key": ".encoder_attn.k_proj", ".cross_attn.value": ".encoder_attn.v_proj", ".cross_attn_ln": ".encoder_attn_layer_norm", ".cross_attn.out": ".encoder_attn.out_proj", "decoder.ln.": "decoder.layer_norm.", "encoder.ln.": "encoder.layer_norm.", "token_embedding": "embed_tokens", "encoder.positional_embedding": "encoder.embed_positions.weight", "decoder.positional_embedding": "decoder.embed_positions.weight", "ln_post": "layer_norm", } def rename_keys(s_dict): keys = list(s_dict.keys()) for key in keys: new_key = key for k, v in WHISPER_MAPPING.items(): if k in key: new_key = new_key.replace(k, v) print(f"{key} -> {new_key}") s_dict[new_key] = s_dict.pop(key) return s_dict def make_linear_from_emb(emb): vocab_size, emb_size = emb.weight.shape lin_layer = nn.Linear(vocab_size, emb_size, bias=False) lin_layer.weight.data = emb.weight.data return lin_layer def _download(url: str, root: str) -> Any: os.makedirs(root, exist_ok=True) filename = os.path.basename(url) expected_sha256 = url.split("/")[-2] download_target = os.path.join(root, filename) if os.path.exists(download_target) and not os.path.isfile(download_target): raise RuntimeError(f"{download_target} exists and is not a regular file") if os.path.isfile(download_target): model_bytes = open(download_target, "rb").read() if insecure_hashlib.sha256(model_bytes).hexdigest() == expected_sha256: return torch.load(io.BytesIO(model_bytes)) else: warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file") with urllib.request.urlopen(url) as source, open(download_target, "wb") as output: with tqdm( total=int(source.info().get("Content-Length")), ncols=80, unit="iB", unit_scale=True, unit_divisor=1024 ) as loop: while True: buffer = source.read(8192) if not buffer: break output.write(buffer) loop.update(len(buffer)) model_bytes = open(download_target, "rb").read() if insecure_hashlib.sha256(model_bytes).hexdigest() != expected_sha256: raise RuntimeError( "Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." ) return torch.load(io.BytesIO(model_bytes)) def convert_openai_whisper_to_tfms( checkpoint_path, pytorch_dump_folder_path ) -> Tuple[WhisperForConditionalGeneration, bool, int]: if ".pt" not in checkpoint_path: root = os.path.dirname(pytorch_dump_folder_path) or "." original_checkpoint = _download(_MODELS[checkpoint_path], root) openai_version = checkpoint_path else: original_checkpoint = torch.load(checkpoint_path, map_location="cpu") openai_version = None dimensions = original_checkpoint["dims"] state_dict = original_checkpoint["model_state_dict"] proj_out_weights = state_dict["decoder.token_embedding.weight"] remove_ignore_keys_(state_dict) rename_keys(state_dict) tie_embeds = True ffn_dim = state_dict["decoder.layers.0.fc1.weight"].shape[0] # a hacky way to properly set up the bos/eos/pad token ids in the model endoftext_id = 50257 if dimensions["n_vocab"] > 51865 else 50256 config = WhisperConfig( vocab_size=dimensions["n_vocab"], encoder_ffn_dim=ffn_dim, decoder_ffn_dim=ffn_dim, num_mel_bins=dimensions["n_mels"], d_model=dimensions["n_audio_state"], max_target_positions=dimensions["n_text_ctx"], encoder_layers=dimensions["n_audio_layer"], encoder_attention_heads=dimensions["n_audio_head"], decoder_layers=dimensions["n_text_layer"], decoder_attention_heads=dimensions["n_text_head"], max_source_positions=dimensions["n_audio_ctx"], eos_token_id=endoftext_id, bos_token_id=endoftext_id, pad_token_id=endoftext_id, decoder_start_token_id=endoftext_id + 1, ) model = WhisperForConditionalGeneration(config) missing, unexpected = model.model.load_state_dict(state_dict, strict=False) if len(missing) > 0 and not set(missing) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( "Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing," f" but all the following weights are missing {missing}" ) if tie_embeds: model.proj_out = make_linear_from_emb(model.model.decoder.embed_tokens) else: model.proj_out.weight.data = proj_out_weights # determine those parameters from a model checkpoint as Whisper repo does is_multilingual = model.config.vocab_size >= 51865 num_languages = model.config.vocab_size - 51765 - int(is_multilingual) model.generation_config = _get_generation_config( is_multilingual, num_languages, openai_version, ) return model, is_multilingual, num_languages # Adapted from https://github.com/openai/tiktoken/issues/60#issuecomment-1499977960 def _bpe(mergeable_ranks, token: bytes, max_rank=None) -> list[bytes]: parts = [bytes([b]) for b in token] while True: min_idx = None min_rank = None for i, pair in enumerate(zip(parts[:-1], parts[1:])): rank = mergeable_ranks.get(pair[0] + pair[1]) if rank is not None and (min_rank is None or rank < min_rank): min_idx = i min_rank = rank if min_rank is None or (max_rank is not None and min_rank >= max_rank): break assert min_idx is not None parts = parts[:min_idx] + [parts[min_idx] + parts[min_idx + 1]] + parts[min_idx + 2 :] return parts def convert_tiktoken_bpe_to_hf(tiktoken_url: str): bpe_ranks = load_tiktoken_bpe(tiktoken_url) byte_encoder = bytes_to_unicode() def token_bytes_to_string(b): return "".join([byte_encoder[ord(char)] for char in b.decode("latin-1")]) merges = [] vocab = {} for token, rank in bpe_ranks.items(): vocab[token_bytes_to_string(token)] = rank if len(token) == 1: continue merged = tuple(_bpe(bpe_ranks, token, max_rank=rank)) if len(merged) == 2: # account for empty token merges.append(" ".join(map(token_bytes_to_string, merged))) return vocab, merges def convert_tiktoken_to_hf( multilingual: bool = True, num_languages: int = 100, time_precision=0.02 ) -> WhisperTokenizer: # requires whisper, unless we use the path to the tiktoken file tiktoken_tokenizer_path = _TOKENIZERS["multilingual" if multilingual else "english"] start_of_transcript = ["<|endoftext|>", "<|startoftranscript|>"] control_tokens = [ "<|translate|>", "<|transcribe|>", "<|startoflm|>", "<|startofprev|>", "<|nospeech|>", "<|notimestamps|>", ] # these are special tokens, not normalized language_tokens = [f"<|{k}|>" for k in list(LANGUAGES)[:num_languages]] # These are not special but normalized timestamp_tokens = [("<|%.2f|>" % (i * time_precision)) for i in range(1500 + 1)] vocab, merges = convert_tiktoken_bpe_to_hf(tiktoken_tokenizer_path) with tempfile.TemporaryDirectory() as tmpdirname: vocab_file = f"{tmpdirname}/vocab.json" merge_file = f"{tmpdirname}/merges.txt" with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n") with open(merge_file, "w", encoding="utf-8") as writer: writer.write("#version: 0.2\n") for bpe_tokens in merges: writer.write(bpe_tokens + "\n") hf_tokenizer = WhisperTokenizer(vocab_file, merge_file) hf_tokenizer.add_tokens(start_of_transcript + language_tokens + control_tokens, special_tokens=True) hf_tokenizer.add_tokens(timestamp_tokens, special_tokens=False) return hf_tokenizer if __name__ == "__main__": parser = argparse.ArgumentParser() # # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Path to the downloaded checkpoints") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--convert_preprocessor", type=bool, default=False, help="Whether or not the preprocessor (tokenizer + feature extractor) should be converted along with the model.", ) args = parser.parse_args() model, is_multilingual, num_languages = convert_openai_whisper_to_tfms( args.checkpoint_path, args.pytorch_dump_folder_path ) if args.convert_preprocessor: try: if not _is_package_available("tiktoken"): raise """`tiktoken` is not installed, use `pip install tiktoken` to convert the tokenizer""" except Exception: pass else: from tiktoken.load import load_tiktoken_bpe tokenizer = convert_tiktoken_to_hf(is_multilingual, num_languages) feature_extractor = WhisperFeatureExtractor( feature_size=model.config.num_mel_bins, # the rest of default parameters are the same as hardcoded in openai/whisper ) processor = WhisperProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) processor.save_pretrained(args.pytorch_dump_folder_path) # save fast tokenizer as well fast_tokenizer = WhisperTokenizerFast.from_pretrained(args.pytorch_dump_folder_path) fast_tokenizer.save_pretrained(args.pytorch_dump_folder_path, legacy_format=False) model.save_pretrained(args.pytorch_dump_folder_path)
transformers/src/transformers/models/whisper/convert_openai_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/whisper/convert_openai_to_hf.py", "repo_id": "transformers", "token_count": 6393 }
374
# coding=utf-8 # Copyright The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ XGLM model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP = { "facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/config.json", # See all XGLM models at https://huggingface.co/models?filter=xglm } class XGLMConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`XGLMModel`]. It is used to instantiate an XGLM model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the XGLM [facebook/xglm-564M](https://huggingface.co/facebook/xglm-564M) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 256008): Vocabulary size of the XGLM model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`XGLMModel`] or [`FlaxXGLMModel`]. max_position_embeddings (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). d_model (`int`, *optional*, defaults to 1024): Dimension of the layers and the pooler layer. ffn_dim (`int`, *optional*, defaults to 4096): Dimension of the "intermediate" (often named feed-forward) layer in decoder. num_layers (`int`, *optional*, defaults to 24): Number of hidden layers Transformer decoder. attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, dencoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. scale_embedding (`bool`, *optional*, defaults to `True`): Scale embeddings by diving by sqrt(d_model). use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Example: ```python >>> from transformers import XGLMModel, XGLMConfig >>> # Initializing a XGLM facebook/xglm-564M style configuration >>> configuration = XGLMConfig() >>> # Initializing a model from the facebook/xglm-564M style configuration >>> model = XGLMModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "xglm" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "num_attention_heads": "attention_heads", "hidden_size": "d_model", "num_hidden_layers": "num_layers", } def __init__( self, vocab_size=256008, max_position_embeddings=2048, d_model=1024, ffn_dim=4096, num_layers=24, attention_heads=16, activation_function="gelu", dropout=0.1, attention_dropout=0.1, activation_dropout=0.0, layerdrop=0.0, init_std=0.02, scale_embedding=True, use_cache=True, decoder_start_token_id=2, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.ffn_dim = ffn_dim self.num_layers = num_layers self.attention_heads = attention_heads self.activation_function = activation_function self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.layerdrop = layerdrop self.init_std = init_std self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True self.use_cache = use_cache super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, **kwargs, )
transformers/src/transformers/models/xglm/configuration_xglm.py/0
{ "file_path": "transformers/src/transformers/models/xglm/configuration_xglm.py", "repo_id": "transformers", "token_count": 2346 }
375
# coding=utf-8 # Copyright 2020 The Microsoft Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) SPIECE_UNDERLINE = "▁" VOCAB_FILES_NAMES = {"vocab_file": "prophetnet.tokenizer"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "microsoft/xprophetnet-large-wiki100-cased": ( "https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer" ), } } PRETRAINED_INIT_CONFIGURATION = { "microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False}, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "microsoft/xprophetnet-large-wiki100-cased": 512, } def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() with open(vocab_file, "r", encoding="utf-8") as reader: tokens = reader.readlines() for index, token in enumerate(tokens): token = token.rstrip("\n") vocab[token] = index return vocab class XLMProphetNetTokenizer(PreTrainedTokenizer): """ Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on [SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. bos_token (`str`, *optional*, defaults to `"[SEP]"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"[SEP]"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. Attributes: sp_model (`SentencePieceProcessor`): The *SentencePiece* processor that is used for every conversion (string, tokens and IDs). """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, bos_token="[SEP]", eos_token="[SEP]", sep_token="[SEP]", unk_token="[UNK]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", sp_model_kwargs: Optional[Dict[str, Any]] = None, **kwargs, ) -> None: self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs try: import sentencepiece as spm except ImportError: logger.warning( "You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece" " pip install sentencepiece" ) raise self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(vocab_file)) self.vocab_file = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # put special tokens and [unused] tokens into the vocab self.fairseq_tokens_to_ids = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4} for i in range(10): tok = f"[unused{i}]" self.fairseq_tokens_to_ids[tok] = 5 + i # The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab self.fairseq_offset = 12 self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()} # TODO ArthurZ fairseq_ids_to_tokens should be removed super().__init__( bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, unk_token=unk_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs, ) @property def can_save_slow_tokenizer(self) -> bool: return os.path.isfile(self.vocab_file) if self.vocab_file else False def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None return state def __setstate__(self, d): self.__dict__ = d try: import sentencepiece as spm except ImportError: logger.warning( "You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece" " pip install sentencepiece" ) raise # for backward compatibility if not hasattr(self, "sp_model_kwargs"): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is None: return ([0] * len(token_ids_0)) + [1] return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLMProphetNet does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] if token_ids_1 is None: return len(token_ids_0 + sep) * [0] return len(token_ids_0 + sep + sep + token_ids_1 + sep) * [0] @property def vocab_size(self): return len(self.sp_model) + self.fairseq_offset def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _tokenize(self, text: str) -> str: return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] spm_id = self.sp_model.PieceToId(token) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (strings for sub-words) in a single string.""" out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip() return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A XLMProphetNet sequence has the following format: - single sequence: `X [SEP]` - pair of sequences: `A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return token_ids_0 + [self.sep_token_id] sep = [self.sep_token_id] return token_ids_0 + sep + token_ids_1 + sep
transformers/src/transformers/models/xlm_prophetnet/tokenization_xlm_prophetnet.py/0
{ "file_path": "transformers/src/transformers/models/xlm_prophetnet/tokenization_xlm_prophetnet.py", "repo_id": "transformers", "token_count": 5985 }
376
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch XLNet model. """ import warnings from dataclasses import dataclass from typing import List, Optional, Tuple, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_utils import PoolerAnswerClass, PoolerEndLogits, PoolerStartLogits, PreTrainedModel, SequenceSummary from ...pytorch_utils import apply_chunking_to_forward from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_xlnet import XLNetConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "xlnet-base-cased" _CONFIG_FOR_DOC = "XLNetConfig" XLNET_PRETRAINED_MODEL_ARCHIVE_LIST = [ "xlnet-base-cased", "xlnet-large-cased", # See all XLNet models at https://huggingface.co/models?filter=xlnet ] def build_tf_xlnet_to_pytorch_map(model, config, tf_weights=None): """ A map of modules from TF to PyTorch. I use a map to keep the PyTorch model as identical to the original PyTorch model as possible. """ tf_to_pt_map = {} if hasattr(model, "transformer"): if hasattr(model, "lm_loss"): # We will load also the output bias tf_to_pt_map["model/lm_loss/bias"] = model.lm_loss.bias if hasattr(model, "sequence_summary") and "model/sequnece_summary/summary/kernel" in tf_weights: # We will load also the sequence summary tf_to_pt_map["model/sequnece_summary/summary/kernel"] = model.sequence_summary.summary.weight tf_to_pt_map["model/sequnece_summary/summary/bias"] = model.sequence_summary.summary.bias if ( hasattr(model, "logits_proj") and config.finetuning_task is not None and f"model/regression_{config.finetuning_task}/logit/kernel" in tf_weights ): tf_to_pt_map[f"model/regression_{config.finetuning_task}/logit/kernel"] = model.logits_proj.weight tf_to_pt_map[f"model/regression_{config.finetuning_task}/logit/bias"] = model.logits_proj.bias # Now load the rest of the transformer model = model.transformer # Embeddings and output tf_to_pt_map.update( { "model/transformer/word_embedding/lookup_table": model.word_embedding.weight, "model/transformer/mask_emb/mask_emb": model.mask_emb, } ) # Transformer blocks for i, b in enumerate(model.layer): layer_str = f"model/transformer/layer_{i}/" tf_to_pt_map.update( { layer_str + "rel_attn/LayerNorm/gamma": b.rel_attn.layer_norm.weight, layer_str + "rel_attn/LayerNorm/beta": b.rel_attn.layer_norm.bias, layer_str + "rel_attn/o/kernel": b.rel_attn.o, layer_str + "rel_attn/q/kernel": b.rel_attn.q, layer_str + "rel_attn/k/kernel": b.rel_attn.k, layer_str + "rel_attn/r/kernel": b.rel_attn.r, layer_str + "rel_attn/v/kernel": b.rel_attn.v, layer_str + "ff/LayerNorm/gamma": b.ff.layer_norm.weight, layer_str + "ff/LayerNorm/beta": b.ff.layer_norm.bias, layer_str + "ff/layer_1/kernel": b.ff.layer_1.weight, layer_str + "ff/layer_1/bias": b.ff.layer_1.bias, layer_str + "ff/layer_2/kernel": b.ff.layer_2.weight, layer_str + "ff/layer_2/bias": b.ff.layer_2.bias, } ) # Relative positioning biases if config.untie_r: r_r_list = [] r_w_list = [] r_s_list = [] seg_embed_list = [] for b in model.layer: r_r_list.append(b.rel_attn.r_r_bias) r_w_list.append(b.rel_attn.r_w_bias) r_s_list.append(b.rel_attn.r_s_bias) seg_embed_list.append(b.rel_attn.seg_embed) else: r_r_list = [model.r_r_bias] r_w_list = [model.r_w_bias] r_s_list = [model.r_s_bias] seg_embed_list = [model.seg_embed] tf_to_pt_map.update( { "model/transformer/r_r_bias": r_r_list, "model/transformer/r_w_bias": r_w_list, "model/transformer/r_s_bias": r_s_list, "model/transformer/seg_embed": seg_embed_list, } ) return tf_to_pt_map def load_tf_weights_in_xlnet(model, config, tf_path): """Load tf checkpoints in a pytorch model""" try: import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise # Load weights from TF model init_vars = tf.train.list_variables(tf_path) tf_weights = {} for name, shape in init_vars: logger.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(tf_path, name) tf_weights[name] = array # Build TF to PyTorch weights loading map tf_to_pt_map = build_tf_xlnet_to_pytorch_map(model, config, tf_weights) for name, pointer in tf_to_pt_map.items(): logger.info(f"Importing {name}") if name not in tf_weights: logger.info(f"{name} not in tf pre-trained weights, skipping") continue array = tf_weights[name] # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if "kernel" in name and ("ff" in name or "summary" in name or "logit" in name): logger.info("Transposing") array = np.transpose(array) if isinstance(pointer, list): # Here we will split the TF weights assert ( len(pointer) == array.shape[0] ), f"Pointer length {len(pointer)} and array length {array.shape[0]} mismatched" for i, p_i in enumerate(pointer): arr_i = array[i, ...] try: assert ( p_i.shape == arr_i.shape ), f"Pointer shape {p_i.shape} and array shape {arr_i.shape} mismatched" except AssertionError as e: e.args += (p_i.shape, arr_i.shape) raise logger.info(f"Initialize PyTorch weight {name} for layer {i}") p_i.data = torch.from_numpy(arr_i) else: try: assert ( pointer.shape == array.shape ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info(f"Initialize PyTorch weight {name}") pointer.data = torch.from_numpy(array) tf_weights.pop(name, None) tf_weights.pop(name + "/Adam", None) tf_weights.pop(name + "/Adam_1", None) logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}") return model class XLNetRelativeAttention(nn.Module): def __init__(self, config): super().__init__() if config.d_model % config.n_head != 0: raise ValueError( f"The hidden size ({config.d_model}) is not a multiple of the number of attention " f"heads ({config.n_head}" ) self.n_head = config.n_head self.d_head = config.d_head self.d_model = config.d_model self.scale = 1 / (config.d_head**0.5) self.q = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head)) self.k = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head)) self.v = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head)) self.o = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head)) self.r = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head)) self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head)) self.r_s_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head)) self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head)) self.seg_embed = nn.Parameter(torch.FloatTensor(2, self.n_head, self.d_head)) self.layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.dropout) def prune_heads(self, heads): raise NotImplementedError @staticmethod def rel_shift(x, klen=-1): """perform relative shift to form the relative attention score.""" x_size = x.shape x = x.reshape(x_size[1], x_size[0], x_size[2], x_size[3]) x = x[1:, ...] x = x.reshape(x_size[0], x_size[1] - 1, x_size[2], x_size[3]) # x = x[:, 0:klen, :, :] x = torch.index_select(x, 1, torch.arange(klen, device=x.device, dtype=torch.long)) return x @staticmethod def rel_shift_bnij(x, klen=-1): x_size = x.shape x = x.reshape(x_size[0], x_size[1], x_size[3], x_size[2]) x = x[:, :, 1:, :] x = x.reshape(x_size[0], x_size[1], x_size[2], x_size[3] - 1) # Note: the tensor-slice form was faster in my testing than torch.index_select # However, tracing doesn't like the nature of the slice, and if klen changes # during the run then it'll fail, whereas index_select will be fine. x = torch.index_select(x, 3, torch.arange(klen, device=x.device, dtype=torch.long)) # x = x[:, :, :, :klen] return x def rel_attn_core( self, q_head, k_head_h, v_head_h, k_head_r, seg_mat=None, attn_mask=None, head_mask=None, output_attentions=False, ): """Core relative positional attention operations.""" # content based attention score ac = torch.einsum("ibnd,jbnd->bnij", q_head + self.r_w_bias, k_head_h) # position based attention score bd = torch.einsum("ibnd,jbnd->bnij", q_head + self.r_r_bias, k_head_r) bd = self.rel_shift_bnij(bd, klen=ac.shape[3]) # segment based attention score if seg_mat is None: ef = 0 else: ef = torch.einsum("ibnd,snd->ibns", q_head + self.r_s_bias, self.seg_embed) ef = torch.einsum("ijbs,ibns->bnij", seg_mat, ef) # merge attention scores and perform masking attn_score = (ac + bd + ef) * self.scale if attn_mask is not None: # attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask if attn_mask.dtype == torch.float16: attn_score = attn_score - 65500 * torch.einsum("ijbn->bnij", attn_mask) else: attn_score = attn_score - 1e30 * torch.einsum("ijbn->bnij", attn_mask) # attention probability attn_prob = nn.functional.softmax(attn_score, dim=3) attn_prob = self.dropout(attn_prob) # Mask heads if we want to if head_mask is not None: attn_prob = attn_prob * torch.einsum("ijbn->bnij", head_mask) # attention output attn_vec = torch.einsum("bnij,jbnd->ibnd", attn_prob, v_head_h) if output_attentions: return attn_vec, torch.einsum("bnij->ijbn", attn_prob) return attn_vec def post_attention(self, h, attn_vec, residual=True): """Post-attention processing.""" # post-attention projection (back to `d_model`) attn_out = torch.einsum("ibnd,hnd->ibh", attn_vec, self.o) attn_out = self.dropout(attn_out) if residual: attn_out = attn_out + h output = self.layer_norm(attn_out) return output def forward( self, h, g, attn_mask_h, attn_mask_g, r, seg_mat, mems=None, target_mapping=None, head_mask=None, output_attentions=False, ): if g is not None: # Two-stream attention with relative positional encoding. # content based attention score if mems is not None and mems.dim() > 1: cat = torch.cat([mems, h], dim=0) else: cat = h # content-based key head k_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.k) # content-based value head v_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.v) # position-based key head k_head_r = torch.einsum("ibh,hnd->ibnd", r, self.r) # h-stream # content-stream query head q_head_h = torch.einsum("ibh,hnd->ibnd", h, self.q) # core attention ops attn_vec_h = self.rel_attn_core( q_head_h, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_h, head_mask=head_mask, output_attentions=output_attentions, ) if output_attentions: attn_vec_h, attn_prob_h = attn_vec_h # post processing output_h = self.post_attention(h, attn_vec_h) # g-stream # query-stream query head q_head_g = torch.einsum("ibh,hnd->ibnd", g, self.q) # core attention ops if target_mapping is not None: q_head_g = torch.einsum("mbnd,mlb->lbnd", q_head_g, target_mapping) attn_vec_g = self.rel_attn_core( q_head_g, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_g, head_mask=head_mask, output_attentions=output_attentions, ) if output_attentions: attn_vec_g, attn_prob_g = attn_vec_g attn_vec_g = torch.einsum("lbnd,mlb->mbnd", attn_vec_g, target_mapping) else: attn_vec_g = self.rel_attn_core( q_head_g, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_g, head_mask=head_mask, output_attentions=output_attentions, ) if output_attentions: attn_vec_g, attn_prob_g = attn_vec_g # post processing output_g = self.post_attention(g, attn_vec_g) if output_attentions: attn_prob = attn_prob_h, attn_prob_g else: # Multi-head attention with relative positional encoding if mems is not None and mems.dim() > 1: cat = torch.cat([mems, h], dim=0) else: cat = h # content heads q_head_h = torch.einsum("ibh,hnd->ibnd", h, self.q) k_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.k) v_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.v) # positional heads # type casting for fp16 support k_head_r = torch.einsum("ibh,hnd->ibnd", r.type(self.r.dtype), self.r) # core attention ops attn_vec = self.rel_attn_core( q_head_h, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_h, head_mask=head_mask, output_attentions=output_attentions, ) if output_attentions: attn_vec, attn_prob = attn_vec # post processing output_h = self.post_attention(h, attn_vec) output_g = None outputs = (output_h, output_g) if output_attentions: outputs = outputs + (attn_prob,) return outputs class XLNetFeedForward(nn.Module): def __init__(self, config): super().__init__() self.layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps) self.layer_1 = nn.Linear(config.d_model, config.d_inner) self.layer_2 = nn.Linear(config.d_inner, config.d_model) self.dropout = nn.Dropout(config.dropout) if isinstance(config.ff_activation, str): self.activation_function = ACT2FN[config.ff_activation] else: self.activation_function = config.ff_activation def forward(self, inp): output = inp output = self.layer_1(output) output = self.activation_function(output) output = self.dropout(output) output = self.layer_2(output) output = self.dropout(output) output = self.layer_norm(output + inp) return output class XLNetLayer(nn.Module): def __init__(self, config): super().__init__() self.rel_attn = XLNetRelativeAttention(config) self.ff = XLNetFeedForward(config) self.dropout = nn.Dropout(config.dropout) self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 def forward( self, output_h, output_g, attn_mask_h, attn_mask_g, r, seg_mat, mems=None, target_mapping=None, head_mask=None, output_attentions=False, ): outputs = self.rel_attn( output_h, output_g, attn_mask_h, attn_mask_g, r, seg_mat, mems=mems, target_mapping=target_mapping, head_mask=head_mask, output_attentions=output_attentions, ) output_h, output_g = outputs[:2] if output_g is not None: output_g = apply_chunking_to_forward( self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, output_g ) output_h = apply_chunking_to_forward(self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, output_h) outputs = (output_h, output_g) + outputs[2:] # Add again attentions if there are there return outputs def ff_chunk(self, output_x): output_x = self.ff(output_x) return output_x class XLNetPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = XLNetConfig load_tf_weights = load_tf_weights_in_xlnet base_model_prefix = "transformer" def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, XLNetRelativeAttention): for param in [ module.q, module.k, module.v, module.o, module.r, module.r_r_bias, module.r_s_bias, module.r_w_bias, module.seg_embed, ]: param.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, XLNetModel): module.mask_emb.data.normal_(mean=0.0, std=self.config.initializer_range) @dataclass class XLNetModelOutput(ModelOutput): """ Output type of [`XLNetModel`]. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_predict, hidden_size)`): Sequence of hidden-states at the last layer of the model. `num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`. mems (`List[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: torch.FloatTensor mems: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class XLNetLMHeadModelOutput(ModelOutput): """ Output type of [`XLNetLMHeadModel`]. Args: loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided) Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, num_predict, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). `num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`. mems (`List[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None mems: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class XLNetForSequenceClassificationOutput(ModelOutput): """ Output type of [`XLNetForSequenceClassification`]. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `label` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). mems (`List[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None mems: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class XLNetForTokenClassificationOutput(ModelOutput): """ Output type of [`XLNetForTokenClassificationOutput`]. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided) : Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`): Classification scores (before SoftMax). mems (`List[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None mems: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class XLNetForMultipleChoiceOutput(ModelOutput): """ Output type of [`XLNetForMultipleChoice`]. Args: loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`): *num_choices* is the second dimension of the input tensors. (see *input_ids* above). Classification scores (before SoftMax). mems (`List[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None mems: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class XLNetForQuestionAnsweringSimpleOutput(ModelOutput): """ Output type of [`XLNetForQuestionAnsweringSimple`]. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length,)`): Span-start scores (before SoftMax). end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length,)`): Span-end scores (before SoftMax). mems (`List[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None start_logits: torch.FloatTensor = None end_logits: torch.FloatTensor = None mems: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class XLNetForQuestionAnsweringOutput(ModelOutput): """ Output type of [`XLNetForQuestionAnswering`]. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned if both `start_positions` and `end_positions` are provided): Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses. start_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): Log probabilities for the top config.start_n_top start token possibilities (beam-search). start_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): Indices for the top config.start_n_top start token possibilities (beam-search). end_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): Log probabilities for the top `config.start_n_top * config.end_n_top` end token possibilities (beam-search). end_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): Indices for the top `config.start_n_top * config.end_n_top` end token possibilities (beam-search). cls_logits (`torch.FloatTensor` of shape `(batch_size,)`, *optional*, returned if `start_positions` or `end_positions` is not provided): Log probabilities for the `is_impossible` label of the answers. mems (`List[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None start_top_log_probs: Optional[torch.FloatTensor] = None start_top_index: Optional[torch.LongTensor] = None end_top_log_probs: Optional[torch.FloatTensor] = None end_top_index: Optional[torch.LongTensor] = None cls_logits: Optional[torch.FloatTensor] = None mems: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None XLNET_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`XLNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ XLNET_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) mems (`List[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states (see `mems` output below) . Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. `use_mems` has to be set to `True` to make use of `mems`. perm_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, sequence_length)`, *optional*): Mask to indicate the attention pattern for each input token with values selected in `[0, 1]`: - if `perm_mask[k, i, j] = 0`, i attend to j in batch k; - if `perm_mask[k, i, j] = 1`, i does not attend to j in batch k. If not set, each token attends to all the others (full bidirectional attention). Only used during pretraining (to define factorization order) or for sequential decoding (generation). target_mapping (`torch.FloatTensor` of shape `(batch_size, num_predict, sequence_length)`, *optional*): Mask to indicate the output tokens to use. If `target_mapping[k, i, j] = 1`, the i-th predict in batch k is on the j-th token. Only used during pretraining for partial prediction or for sequential decoding (generation). token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) input_mask (`torch.FloatTensor` of shape `{0}`, *optional*): Mask to avoid performing attention on padding token indices. Negative of `attention_mask`, i.e. with 0 for real tokens and 1 for padding which is kept for compatibility with the original code base. Mask values selected in `[0, 1]`: - 1 for tokens that are **masked**, - 0 for tokens that are **not masked**. You can only uses one of `input_mask` and `attention_mask`. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare XLNet Model transformer outputting raw hidden-states without any specific head on top.", XLNET_START_DOCSTRING, ) class XLNetModel(XLNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.mem_len = config.mem_len self.reuse_len = config.reuse_len self.d_model = config.d_model self.same_length = config.same_length self.attn_type = config.attn_type self.bi_data = config.bi_data self.clamp_len = config.clamp_len self.n_layer = config.n_layer self.word_embedding = nn.Embedding(config.vocab_size, config.d_model) self.mask_emb = nn.Parameter(torch.FloatTensor(1, 1, config.d_model)) self.layer = nn.ModuleList([XLNetLayer(config) for _ in range(config.n_layer)]) self.dropout = nn.Dropout(config.dropout) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.word_embedding def set_input_embeddings(self, new_embeddings): self.word_embedding = new_embeddings def _prune_heads(self, heads_to_prune): raise NotImplementedError def create_mask(self, qlen, mlen): """ Creates causal attention mask. Float mask where 1.0 indicates masked, 0.0 indicates not-masked. Args: qlen: Sequence length mlen: Mask length :: same_length=False: same_length=True: <mlen > < qlen > <mlen > < qlen > ^ [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 0 1 1 1] [1 0 0 0 0 0 1 1 1] qlen [0 0 0 0 0 0 0 1 1] [1 1 0 0 0 0 0 1 1] [0 0 0 0 0 0 0 0 1] [1 1 1 0 0 0 0 0 1] v [0 0 0 0 0 0 0 0 0] [1 1 1 1 0 0 0 0 0] """ mask = torch.ones((qlen, qlen + mlen), device=self.device) if self.same_length: mask_lo = mask[:, :qlen].tril(-1) mask.triu_(mlen + 1) mask[:, :qlen] += mask_lo else: mask.triu_(mlen + 1) return mask def cache_mem(self, curr_out, prev_mem): # cache hidden states into memory. if self.reuse_len is not None and self.reuse_len > 0: curr_out = curr_out[: self.reuse_len] if self.mem_len is None or self.mem_len == 0: # If `use_mems` is active but no `mem_len` is defined, the model behaves like GPT-2 at inference time # and returns all of the past and current hidden states. cutoff = 0 else: # If `use_mems` is active and `mem_len` is defined, the model returns the last `mem_len` hidden # states. This is the preferred setting for training and long-form generation. cutoff = -self.mem_len if prev_mem is None: # if `use_mems` is active and `mem_len` is defined, the model new_mem = curr_out[cutoff:] else: new_mem = torch.cat([prev_mem, curr_out], dim=0)[cutoff:] return new_mem.detach() @staticmethod def positional_embedding(pos_seq, inv_freq, bsz=None): sinusoid_inp = torch.einsum("i,d->id", pos_seq, inv_freq) pos_emb = torch.cat([torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)], dim=-1) pos_emb = pos_emb[:, None, :] if bsz is not None: pos_emb = pos_emb.expand(-1, bsz, -1) return pos_emb def relative_positional_encoding(self, qlen, klen, bsz=None): # create relative positional encoding. freq_seq = torch.arange(0, self.d_model, 2.0, dtype=torch.int64).float() inv_freq = 1 / torch.pow(10000, (freq_seq / self.d_model)) if self.attn_type == "bi": # beg, end = klen - 1, -qlen beg, end = klen, -qlen elif self.attn_type == "uni": # beg, end = klen - 1, -1 beg, end = klen, -1 else: raise ValueError(f"Unknown `attn_type` {self.attn_type}.") if self.bi_data: fwd_pos_seq = torch.arange(beg, end, -1.0, dtype=torch.int64).float() bwd_pos_seq = torch.arange(-beg, -end, 1.0, dtype=torch.int64).float() if self.clamp_len > 0: fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len) bwd_pos_seq = bwd_pos_seq.clamp(-self.clamp_len, self.clamp_len) if bsz is not None: fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz // 2) bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq, bsz // 2) else: fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq) bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq) pos_emb = torch.cat([fwd_pos_emb, bwd_pos_emb], dim=1) else: fwd_pos_seq = torch.arange(beg, end, -1.0, dtype=torch.int64).float() if self.clamp_len > 0: fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len) pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz) return pos_emb @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=XLNetModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, mems: Optional[torch.Tensor] = None, perm_mask: Optional[torch.Tensor] = None, target_mapping: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, input_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, use_mems: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, # delete after depreciation warning is removed ) -> Union[Tuple, XLNetModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if "use_cache" in kwargs: warnings.warn( "The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems`" " instead.", FutureWarning, ) use_mems = kwargs["use_cache"] if self.training: use_mems = use_mems if use_mems is not None else self.config.use_mems_train else: use_mems = use_mems if use_mems is not None else self.config.use_mems_eval # the original code for XLNet uses shapes [len, bsz] with the batch dimension at the end # but we want a unified interface in the library with the batch size on the first dimension # so we move here the first dimension (batch) to the end if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_ids = input_ids.transpose(0, 1).contiguous() qlen, bsz = input_ids.shape[0], input_ids.shape[1] elif inputs_embeds is not None: inputs_embeds = inputs_embeds.transpose(0, 1).contiguous() qlen, bsz = inputs_embeds.shape[0], inputs_embeds.shape[1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") token_type_ids = token_type_ids.transpose(0, 1).contiguous() if token_type_ids is not None else None input_mask = input_mask.transpose(0, 1).contiguous() if input_mask is not None else None attention_mask = attention_mask.transpose(0, 1).contiguous() if attention_mask is not None else None perm_mask = perm_mask.permute(1, 2, 0).contiguous() if perm_mask is not None else None target_mapping = target_mapping.permute(1, 2, 0).contiguous() if target_mapping is not None else None mlen = mems[0].shape[0] if mems is not None and mems[0] is not None else 0 klen = mlen + qlen dtype_float = self.dtype device = self.device # Attention mask # causal attention mask if self.attn_type == "uni": attn_mask = self.create_mask(qlen, mlen) attn_mask = attn_mask[:, :, None, None] elif self.attn_type == "bi": attn_mask = None else: raise ValueError(f"Unsupported attention type: {self.attn_type}") # data mask: input mask & perm mask assert input_mask is None or attention_mask is None, "You can only use one of input_mask (uses 1 for padding) " "or attention_mask (uses 0 for padding, added for compatibility with BERT). Please choose one." if input_mask is None and attention_mask is not None: input_mask = 1.0 - attention_mask if input_mask is not None and perm_mask is not None: data_mask = input_mask[None] + perm_mask elif input_mask is not None and perm_mask is None: data_mask = input_mask[None] elif input_mask is None and perm_mask is not None: data_mask = perm_mask else: data_mask = None if data_mask is not None: # all mems can be attended to if mlen > 0: mems_mask = torch.zeros([data_mask.shape[0], mlen, bsz]).to(data_mask) data_mask = torch.cat([mems_mask, data_mask], dim=1) if attn_mask is None: attn_mask = data_mask[:, :, :, None] else: attn_mask += data_mask[:, :, :, None] if attn_mask is not None: attn_mask = (attn_mask > 0).to(dtype_float) if attn_mask is not None: non_tgt_mask = -torch.eye(qlen).to(attn_mask) if mlen > 0: non_tgt_mask = torch.cat([torch.zeros([qlen, mlen]).to(attn_mask), non_tgt_mask], dim=-1) non_tgt_mask = ((attn_mask + non_tgt_mask[:, :, None, None]) > 0).to(attn_mask) else: non_tgt_mask = None # Word embeddings and prepare h & g hidden states if inputs_embeds is not None: word_emb_k = inputs_embeds else: word_emb_k = self.word_embedding(input_ids) output_h = self.dropout(word_emb_k) if target_mapping is not None: word_emb_q = self.mask_emb.expand(target_mapping.shape[0], bsz, -1) # else: # We removed the inp_q input which was same as target mapping # inp_q_ext = inp_q[:, :, None] # word_emb_q = inp_q_ext * self.mask_emb + (1 - inp_q_ext) * word_emb_k output_g = self.dropout(word_emb_q) else: output_g = None # Segment embedding if token_type_ids is not None: # Convert `token_type_ids` to one-hot `seg_mat` if mlen > 0: mem_pad = torch.zeros([mlen, bsz], dtype=torch.long, device=device) cat_ids = torch.cat([mem_pad, token_type_ids], dim=0) else: cat_ids = token_type_ids # `1` indicates not in the same segment [qlen x klen x bsz] seg_mat = (token_type_ids[:, None] != cat_ids[None, :]).long() seg_mat = nn.functional.one_hot(seg_mat, num_classes=2).to(dtype_float) else: seg_mat = None # Positional encoding pos_emb = self.relative_positional_encoding(qlen, klen, bsz=bsz) pos_emb = pos_emb.to(output_h.device) pos_emb = self.dropout(pos_emb) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer) # and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head] if head_mask is not None: if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0) head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1) head_mask = head_mask.to( dtype=next(self.parameters()).dtype ) # switch to float if need + fp16 compatibility else: head_mask = [None] * self.n_layer new_mems = () if mems is None: mems = [None] * len(self.layer) attentions = [] if output_attentions else None hidden_states = [] if output_hidden_states else None for i, layer_module in enumerate(self.layer): if use_mems: # cache new mems new_mems = new_mems + (self.cache_mem(output_h, mems[i]),) if output_hidden_states: hidden_states.append((output_h, output_g) if output_g is not None else output_h) outputs = layer_module( output_h, output_g, attn_mask_h=non_tgt_mask, attn_mask_g=attn_mask, r=pos_emb, seg_mat=seg_mat, mems=mems[i], target_mapping=target_mapping, head_mask=head_mask[i], output_attentions=output_attentions, ) output_h, output_g = outputs[:2] if output_attentions: attentions.append(outputs[2]) # Add last hidden state if output_hidden_states: hidden_states.append((output_h, output_g) if output_g is not None else output_h) output = self.dropout(output_g if output_g is not None else output_h) # Prepare outputs, we transpose back here to shape [bsz, len, hidden_dim] (cf. beginning of forward() method) output = output.permute(1, 0, 2).contiguous() if not use_mems: new_mems = None if output_hidden_states: if output_g is not None: hidden_states = tuple(h.permute(1, 0, 2).contiguous() for hs in hidden_states for h in hs) else: hidden_states = tuple(hs.permute(1, 0, 2).contiguous() for hs in hidden_states) if output_attentions: if target_mapping is not None: # when target_mapping is provided, there are 2-tuple of attentions attentions = tuple( tuple(att_stream.permute(2, 3, 0, 1).contiguous() for att_stream in t) for t in attentions ) else: attentions = tuple(t.permute(2, 3, 0, 1).contiguous() for t in attentions) if not return_dict: return tuple(v for v in [output, new_mems, hidden_states, attentions] if v is not None) return XLNetModelOutput( last_hidden_state=output, mems=new_mems, hidden_states=hidden_states, attentions=attentions ) @add_start_docstrings( """ XLNet Model with a language modeling head on top (linear layer with weights tied to the input embeddings). """, XLNET_START_DOCSTRING, ) class XLNetLMHeadModel(XLNetPreTrainedModel): _tied_weights_keys = ["lm_loss.weight"] def __init__(self, config): super().__init__(config) self.attn_type = config.attn_type self.same_length = config.same_length self.transformer = XLNetModel(config) self.lm_loss = nn.Linear(config.d_model, config.vocab_size, bias=True) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_loss def set_output_embeddings(self, new_embeddings): self.lm_loss = new_embeddings def prepare_inputs_for_generation(self, input_ids, past_key_values=None, use_mems=None, **kwargs): # Add dummy token at the end (no attention on this one) effective_batch_size = input_ids.shape[0] dummy_token = torch.zeros((effective_batch_size, 1), dtype=torch.long, device=input_ids.device) # At every pass, the attention values for the new token and the two last generated tokens # are computed, the rest is reloaded from the `past` cache. A purely auto-regressive model would have # offset = 1; offset = 2 seems to have slightly better computation. offset = 2 if past_key_values: input_ids = torch.cat([input_ids[:, -offset:], dummy_token], dim=1) else: input_ids = torch.cat([input_ids, dummy_token], dim=1) # Build permutation mask so that previous tokens don't see last token sequence_length = input_ids.shape[1] perm_mask = torch.zeros( (effective_batch_size, sequence_length, sequence_length), dtype=torch.float, device=input_ids.device ) perm_mask[:, :, -1] = 1.0 # We'll only predict the last token target_mapping = torch.zeros( (effective_batch_size, 1, sequence_length), dtype=torch.float, device=input_ids.device ) target_mapping[:, 0, -1] = 1.0 inputs = { "input_ids": input_ids, "perm_mask": perm_mask, "target_mapping": target_mapping, "use_mems": use_mems, } # if past is defined in model kwargs then use it for faster decoding if past_key_values: inputs["mems"] = tuple(layer_past[:-offset, :, :] for layer_past in past_key_values) return inputs @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=XLNetLMHeadModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, mems: Optional[torch.Tensor] = None, perm_mask: Optional[torch.Tensor] = None, target_mapping: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, input_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_mems: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, # delete when `use_cache` is removed in XLNetModel ) -> Union[Tuple, XLNetLMHeadModelOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, num_predict)`, *optional*): Labels for masked language modeling. `num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`. The labels should correspond to the masked input words that should be predicted and depends on `target_mapping`. Note in order to perform standard auto-regressive language modeling a *<mask>* token has to be added to the `input_ids` (see the `prepare_inputs_for_generation` function and examples below) Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored, the loss is only computed for labels in `[0, ..., config.vocab_size]` Return: Examples: ```python >>> from transformers import AutoTokenizer, XLNetLMHeadModel >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("xlnet-large-cased") >>> model = XLNetLMHeadModel.from_pretrained("xlnet-large-cased") >>> # We show how to setup inputs to predict a next token using a bi-directional context. >>> input_ids = torch.tensor( ... tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=False) ... ).unsqueeze( ... 0 ... ) # We will predict the masked token >>> perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float) >>> perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token >>> target_mapping = torch.zeros( ... (1, 1, input_ids.shape[1]), dtype=torch.float ... ) # Shape [1, 1, seq_length] => let's predict one token >>> target_mapping[ ... 0, 0, -1 ... ] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token) >>> outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping) >>> next_token_logits = outputs[ ... 0 ... ] # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size] >>> # The same way can the XLNetLMHeadModel be used to be trained by standard auto-regressive language modeling. >>> input_ids = torch.tensor( ... tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=False) ... ).unsqueeze( ... 0 ... ) # We will predict the masked token >>> labels = torch.tensor(tokenizer.encode("cute", add_special_tokens=False)).unsqueeze(0) >>> assert labels.shape[0] == 1, "only one word will be predicted" >>> perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float) >>> perm_mask[ ... :, :, -1 ... ] = 1.0 # Previous tokens don't see last token as is done in standard auto-regressive lm training >>> target_mapping = torch.zeros( ... (1, 1, input_ids.shape[1]), dtype=torch.float ... ) # Shape [1, 1, seq_length] => let's predict one token >>> target_mapping[ ... 0, 0, -1 ... ] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token) >>> outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping, labels=labels) >>> loss = outputs.loss >>> next_token_logits = ( ... outputs.logits ... ) # Logits have shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) logits = self.lm_loss(transformer_outputs[0]) loss = None if labels is not None: # Flatten the tokens loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1)) if not return_dict: output = (logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return XLNetLMHeadModelOutput( loss=loss, logits=logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @staticmethod def _reorder_cache(mems: List[torch.Tensor], beam_idx: torch.Tensor) -> List[torch.Tensor]: """ This function is used to re-order the `mems` cache if [`~PreTrainedModel.beam_search`] or [`~PreTrainedModel.beam_sample`] is called. This is required to match `mems` with the correct beam_idx at every generation step. """ return [layer_past.index_select(1, beam_idx.to(layer_past.device)) for layer_past in mems] @add_start_docstrings( """ XLNet Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, XLNET_START_DOCSTRING, ) class XLNetForSequenceClassification(XLNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.transformer = XLNetModel(config) self.sequence_summary = SequenceSummary(config) self.logits_proj = nn.Linear(config.d_model, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=XLNetForSequenceClassificationOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, mems: Optional[torch.Tensor] = None, perm_mask: Optional[torch.Tensor] = None, target_mapping: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, input_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_mems: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, # delete when `use_cache` is removed in XLNetModel ) -> Union[Tuple, XLNetForSequenceClassificationOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) output = transformer_outputs[0] output = self.sequence_summary(output) logits = self.logits_proj(output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return XLNetForSequenceClassificationOutput( loss=loss, logits=logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @add_start_docstrings( """ XLNet Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, XLNET_START_DOCSTRING, ) class XLNetForTokenClassification(XLNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = XLNetModel(config) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=XLNetForTokenClassificationOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, mems: Optional[torch.Tensor] = None, perm_mask: Optional[torch.Tensor] = None, target_mapping: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, input_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_mems: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, # delete when `use_cache` is removed in XLNetModel ) -> Union[Tuple, XLNetForTokenClassificationOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]` where *num_choices* is the size of the second dimension of the input tensors. (see *input_ids* above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.transformer( input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return XLNetForTokenClassificationOutput( loss=loss, logits=logits, mems=outputs.mems, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ XLNet Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RACE/SWAG tasks. """, XLNET_START_DOCSTRING, ) class XLNetForMultipleChoice(XLNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.transformer = XLNetModel(config) self.sequence_summary = SequenceSummary(config) self.logits_proj = nn.Linear(config.d_model, 1) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=XLNetForMultipleChoiceOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, input_mask: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, mems: Optional[torch.Tensor] = None, perm_mask: Optional[torch.Tensor] = None, target_mapping: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_mems: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, # delete when `use_cache` is removed in XLNetModel ) -> Union[Tuple, XLNetForMultipleChoiceOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None flat_input_mask = input_mask.view(-1, input_mask.size(-1)) if input_mask is not None else None flat_inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) transformer_outputs = self.transformer( flat_input_ids, token_type_ids=flat_token_type_ids, input_mask=flat_input_mask, attention_mask=flat_attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, head_mask=head_mask, inputs_embeds=flat_inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) output = transformer_outputs[0] output = self.sequence_summary(output) logits = self.logits_proj(output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels.view(-1)) if not return_dict: output = (reshaped_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return XLNetForMultipleChoiceOutput( loss=loss, logits=reshaped_logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @add_start_docstrings( """ XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, XLNET_START_DOCSTRING, ) class XLNetForQuestionAnsweringSimple(XLNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = XLNetModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=XLNetForQuestionAnsweringSimpleOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, mems: Optional[torch.Tensor] = None, perm_mask: Optional[torch.Tensor] = None, target_mapping: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, input_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, start_positions: Optional[torch.Tensor] = None, end_positions: Optional[torch.Tensor] = None, use_mems: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, # delete when `use_cache` is removed in XLNetModel ) -> Union[Tuple, XLNetForQuestionAnsweringSimpleOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.transformer( input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return XLNetForQuestionAnsweringSimpleOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, mems=outputs.mems, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, XLNET_START_DOCSTRING, ) class XLNetForQuestionAnswering(XLNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.start_n_top = config.start_n_top self.end_n_top = config.end_n_top self.transformer = XLNetModel(config) self.start_logits = PoolerStartLogits(config) self.end_logits = PoolerEndLogits(config) self.answer_class = PoolerAnswerClass(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=XLNetForQuestionAnsweringOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, mems: Optional[torch.Tensor] = None, perm_mask: Optional[torch.Tensor] = None, target_mapping: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, input_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, start_positions: Optional[torch.Tensor] = None, end_positions: Optional[torch.Tensor] = None, is_impossible: Optional[torch.Tensor] = None, cls_index: Optional[torch.Tensor] = None, p_mask: Optional[torch.Tensor] = None, use_mems: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, # delete when `use_cache` is removed in XLNetModel ) -> Union[Tuple, XLNetForQuestionAnsweringOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. is_impossible (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels whether a question has an answer or no answer (SQuAD 2.0) cls_index (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the classification token to use as input for computing plausibility of the answer. p_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...). 1.0 means token should be masked. 0.0 mean token is not masked. Returns: Example: ```python >>> from transformers import AutoTokenizer, XLNetForQuestionAnswering >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("xlnet-base-cased") >>> model = XLNetForQuestionAnswering.from_pretrained("xlnet-base-cased") >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze( ... 0 ... ) # Batch size 1 >>> start_positions = torch.tensor([1]) >>> end_positions = torch.tensor([3]) >>> outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions) >>> loss = outputs.loss ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) hidden_states = transformer_outputs[0] start_logits = self.start_logits(hidden_states, p_mask=p_mask) outputs = transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it if start_positions is not None and end_positions is not None: # If we are on multi-GPU, let's remove the dimension added by batch splitting for x in (start_positions, end_positions, cls_index, is_impossible): if x is not None and x.dim() > 1: x.squeeze_(-1) # during training, compute the end logits based on the ground truth of the start position end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask) loss_fct = CrossEntropyLoss() start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if cls_index is not None and is_impossible is not None: # Predict answerability from the representation of CLS and START cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index) loss_fct_cls = nn.BCEWithLogitsLoss() cls_loss = loss_fct_cls(cls_logits, is_impossible) # note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss total_loss += cls_loss * 0.5 if not return_dict: return (total_loss,) + transformer_outputs[1:] else: return XLNetForQuestionAnsweringOutput( loss=total_loss, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) else: # during inference, compute the end logits based on beam search bsz, slen, hsz = hidden_states.size() start_log_probs = nn.functional.softmax(start_logits, dim=-1) # shape (bsz, slen) start_top_log_probs, start_top_index = torch.topk( start_log_probs, self.start_n_top, dim=-1 ) # shape (bsz, start_n_top) start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz) start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz) start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz) hidden_states_expanded = hidden_states.unsqueeze(2).expand_as( start_states ) # shape (bsz, slen, start_n_top, hsz) p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask) end_log_probs = nn.functional.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top) end_top_log_probs, end_top_index = torch.topk( end_log_probs, self.end_n_top, dim=1 ) # shape (bsz, end_n_top, start_n_top) end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top) end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top) start_states = torch.einsum( "blh,bl->bh", hidden_states, start_log_probs ) # get the representation of START as weighted sum of hidden states cls_logits = self.answer_class( hidden_states, start_states=start_states, cls_index=cls_index ) # Shape (batch size,): one single `cls_logits` for each sample if not return_dict: outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) return outputs + transformer_outputs[1:] else: return XLNetForQuestionAnsweringOutput( start_top_log_probs=start_top_log_probs, start_top_index=start_top_index, end_top_log_probs=end_top_log_probs, end_top_index=end_top_index, cls_logits=cls_logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, )
transformers/src/transformers/models/xlnet/modeling_xlnet.py/0
{ "file_path": "transformers/src/transformers/models/xlnet/modeling_xlnet.py", "repo_id": "transformers", "token_count": 41780 }
377
# coding=utf-8 # Copyright 2022 University of Wisconsin-Madison and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch YOSO model.""" import math from pathlib import Path from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_yoso import YosoConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "uw-madison/yoso-4096" _CONFIG_FOR_DOC = "YosoConfig" YOSO_PRETRAINED_MODEL_ARCHIVE_LIST = [ "uw-madison/yoso-4096", # See all YOSO models at https://huggingface.co/models?filter=yoso ] def load_cuda_kernels(): global lsh_cumulation try: from torch.utils.cpp_extension import load def append_root(files): src_folder = Path(__file__).resolve().parent.parent.parent / "kernels" / "yoso" return [src_folder / file for file in files] src_files = append_root( ["fast_lsh_cumulation_torch.cpp", "fast_lsh_cumulation.cu", "fast_lsh_cumulation_cuda.cu"] ) load("fast_lsh_cumulation", src_files, verbose=True) import fast_lsh_cumulation as lsh_cumulation return True except Exception: lsh_cumulation = None return False def to_contiguous(input_tensors): if isinstance(input_tensors, list): out = [] for tensor in input_tensors: if not tensor.is_contiguous(): tensor = tensor.contiguous() out.append(tensor) return out else: if not input_tensors.is_contiguous(): input_tensors = input_tensors.contiguous() return input_tensors def normalize(input_tensors): if isinstance(input_tensors, list): out = [] for tensor in input_tensors: out.append(nn.functional.normalize(tensor, p=2, dim=-1)) return out else: return nn.functional.normalize(input_tensors, p=2, dim=-1) def hashing(query, key, num_hash, hash_len): if len(query.size()) != 3: raise ValueError("Query has incorrect size.") if len(key.size()) != 3: raise ValueError("Key has incorrect size.") rmat = torch.randn(query.size(0), query.size(2), num_hash * hash_len, device=query.device) raise_pow = 2 ** torch.arange(hash_len, device=query.device) query_projection = torch.matmul(query, rmat).reshape(query.size(0), query.size(1), num_hash, hash_len) key_projection = torch.matmul(key, rmat).reshape(key.size(0), key.size(1), num_hash, hash_len) query_binary = (query_projection > 0).int() key_binary = (key_projection > 0).int() query_hash = torch.sum(query_binary * raise_pow, dim=-1) query_hash = torch.sum(key_binary * raise_pow, dim=-1) return query_hash.int(), query_hash.int() class YosoCumulation(torch.autograd.Function): @staticmethod def forward(ctx, query_mask, key_mask, query, key, value, config): hash_code_len = config["hash_code_len"] expectation = (1 - torch.acos(torch.matmul(query, key.transpose(-1, -2))) / math.pi) ** hash_code_len expectation = expectation * query_mask[:, :, None] * key_mask[:, None, :] cumulation_value = torch.matmul(expectation, value) ctx.save_for_backward(query_mask, key_mask, expectation, query, key, value) ctx.config = config return cumulation_value @staticmethod def backward(ctx, grad): grad = to_contiguous(grad) query_mask, key_mask, expectation, query, key, value = ctx.saved_tensors config = ctx.config hash_code_len = config["hash_code_len"] weighted_exp = torch.matmul(grad, value.transpose(-1, -2)) * expectation grad_query = torch.matmul(weighted_exp, (hash_code_len / 2) * key) grad_key = torch.matmul(weighted_exp.transpose(-1, -2), (hash_code_len / 2) * query) grad_value = torch.matmul(expectation.transpose(-1, -2), grad) return None, None, grad_query, grad_key, grad_value, None class YosoLSHCumulation(torch.autograd.Function): @staticmethod def forward(ctx, query_mask, key_mask, query, key, value, config): if query_mask.size(0) != key_mask.size(0): raise ValueError("Query mask and Key mask differ in sizes in dimension 0") if query_mask.size(0) != query.size(0): raise ValueError("Query mask and Query differ in sizes in dimension 0") if query_mask.size(0) != key.size(0): raise ValueError("Query mask and Key differ in sizes in dimension 0") if query_mask.size(0) != value.size(0): raise ValueError("Query mask and Value mask differ in sizes in dimension 0") if key.size(1) != value.size(1): raise ValueError("Key and Value differ in sizes in dimension 1") if query.size(2) != key.size(2): raise ValueError("Query and Key differ in sizes in dimension 2") query_mask, key_mask, query, key, value = to_contiguous([query_mask, key_mask, query, key, value]) use_cuda = query_mask.is_cuda num_hash = config["num_hash"] hash_code_len = config["hash_code_len"] hashtable_capacity = int(2**hash_code_len) if config["use_fast_hash"]: query_hash_code, key_hash_code = lsh_cumulation.fast_hash( query_mask, query, key_mask, key, num_hash, hash_code_len, use_cuda, 1 ) else: query_hash_code, key_hash_code = hashing(query, key, num_hash, hash_code_len) cumulation_value = lsh_cumulation.lsh_cumulation( query_mask, query_hash_code, key_mask, key_hash_code, value, hashtable_capacity, use_cuda, 1 ) ctx.save_for_backward(query_mask, key_mask, query_hash_code, key_hash_code, query, key, value) ctx.config = config return cumulation_value @staticmethod def backward(ctx, grad): grad = to_contiguous(grad) query_mask, key_mask, query_hash_code, key_hash_code, query, key, value = ctx.saved_tensors config = ctx.config use_cuda = grad.is_cuda hash_code_len = config["hash_code_len"] hashtable_capacity = int(2**hash_code_len) if config["lsh_backward"]: grad_value = lsh_cumulation.lsh_cumulation( key_mask, key_hash_code, query_mask, query_hash_code, grad, hashtable_capacity, use_cuda, 1 ) grad_query = lsh_cumulation.lsh_weighted_cumulation( query_mask, query_hash_code, grad, key_mask, key_hash_code, value, (hash_code_len / 2) * key, hashtable_capacity, use_cuda, 4, ) grad_key = lsh_cumulation.lsh_weighted_cumulation( key_mask, key_hash_code, value, query_mask, query_hash_code, grad, (hash_code_len / 2) * query, hashtable_capacity, use_cuda, 4, ) else: expectation = (1 - torch.acos(torch.matmul(query, key.transpose(-1, -2))) / math.pi) ** hash_code_len expectation = expectation * query_mask[:, :, None] * key_mask[:, None, :] weighted_exp = torch.matmul(grad, value.transpose(-1, -2)) * expectation grad_query = torch.matmul(weighted_exp, (hash_code_len / 2) * key) grad_key = torch.matmul(weighted_exp.transpose(-1, -2), (hash_code_len / 2) * query) grad_value = torch.matmul(expectation.transpose(-1, -2), grad) return None, None, grad_query, grad_key, grad_value, None # Copied from transformers.models.nystromformer.modeling_nystromformer.NystromformerEmbeddings class YosoEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings + 2, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)) + 2, persistent=False ) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), persistent=False, ) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, :seq_length] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class YosoSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = ( position_embedding_type if position_embedding_type is not None else config.position_embedding_type ) self.use_expectation = config.use_expectation self.hash_code_len = config.hash_code_len self.use_conv = config.conv_window is not None self.use_fast_hash = config.use_fast_hash self.num_hash = config.num_hash self.lsh_backward = config.lsh_backward self.lsh_config = { "hash_code_len": self.hash_code_len, "use_fast_hash": self.use_fast_hash, "num_hash": self.num_hash, "lsh_backward": self.lsh_backward, } if config.conv_window is not None: self.conv = nn.Conv2d( in_channels=config.num_attention_heads, out_channels=config.num_attention_heads, kernel_size=(config.conv_window, 1), padding=(config.conv_window // 2, 0), bias=False, groups=config.num_attention_heads, ) def transpose_for_scores(self, layer): new_layer_shape = layer.size()[:-1] + (self.num_attention_heads, self.attention_head_size) layer = layer.view(*new_layer_shape) return layer.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask=None, output_attentions=False): mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) if self.use_conv: conv_value_layer = self.conv(value_layer * attention_mask[:, None, :, None]) batch_size, num_heads, seq_len, head_dim = query_layer.size() query_layer = query_layer.reshape(batch_size * num_heads, seq_len, head_dim) key_layer = key_layer.reshape(batch_size * num_heads, seq_len, head_dim) value_layer = value_layer.reshape(batch_size * num_heads, seq_len, head_dim) # revert changes made by get_extended_attention_mask attention_mask = 1.0 + attention_mask / 10000.0 attention_mask = ( attention_mask.squeeze().repeat(1, num_heads, 1).reshape(batch_size * num_heads, seq_len).int() ) # The CUDA kernels are most efficient with inputs whose size is a multiple of a GPU's warp size (32). Inputs # smaller than this are padded with zeros. gpu_warp_size = 32 if (not self.use_expectation) and head_dim < gpu_warp_size: pad_size = batch_size * num_heads, seq_len, gpu_warp_size - head_dim query_layer = torch.cat( [ query_layer, torch.zeros(pad_size, device=query_layer.device), ], dim=-1, ) key_layer = torch.cat( [ key_layer, torch.zeros(pad_size, device=key_layer.device), ], dim=-1, ) value_layer = torch.cat( [ value_layer, torch.zeros(pad_size, device=value_layer.device), ], dim=-1, ) if self.use_expectation or self.training: query_layer, key_layer = normalize([query_layer, key_layer]) if self.use_expectation: context_layer = YosoCumulation.apply( attention_mask, attention_mask, query_layer, key_layer, value_layer, self.lsh_config ) else: context_layer = YosoLSHCumulation.apply( attention_mask, attention_mask, query_layer, key_layer, value_layer, self.lsh_config ) if (not self.use_expectation) and head_dim < gpu_warp_size: context_layer = context_layer[:, :, :head_dim] context_layer = normalize(context_layer) context_layer = context_layer.reshape(batch_size, num_heads, seq_len, head_dim) if self.use_conv: context_layer += conv_value_layer context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, context_layer) if output_attentions else (context_layer,) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput class YosoSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class YosoAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = YosoSelfAttention(config, position_embedding_type=position_embedding_type) self.output = YosoSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states, attention_mask=None, output_attentions=False): self_outputs = self.self(hidden_states, attention_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate class YosoIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput class YosoOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class YosoLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = YosoAttention(config) self.add_cross_attention = config.add_cross_attention self.intermediate = YosoIntermediate(config) self.output = YosoOutput(config) def forward(self, hidden_states, attention_mask=None, output_attentions=False): self_attention_outputs = self.attention(hidden_states, attention_mask, output_attentions=output_attentions) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class YosoEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([YosoLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, attention_mask, output_attentions, ) else: layer_outputs = layer_module(hidden_states, attention_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutputWithCrossAttentions( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform class YosoPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->Yoso class YosoLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = YosoPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->Yoso class YosoOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = YosoLMPredictionHead(config) def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: prediction_scores = self.predictions(sequence_output) return prediction_scores class YosoPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = YosoConfig base_model_prefix = "yoso" supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) YOSO_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`YosoConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ YOSO_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare YOSO Model transformer outputting raw hidden-states without any specific head on top.", YOSO_START_DOCSTRING, ) class YosoModel(YosoPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embeddings = YosoEmbeddings(config) self.encoder = YosoEncoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(YOSO_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length)), device=device) if token_type_ids is None: if hasattr(self.embeddings, "token_type_ids"): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutputWithCrossAttentions( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) @add_start_docstrings("""YOSO Model with a `language modeling` head on top.""", YOSO_START_DOCSTRING) class YosoForMaskedLM(YosoPreTrainedModel): _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] def __init__(self, config): super().__init__(config) self.yoso = YosoModel(config) self.cls = YosoOnlyMLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings @add_start_docstrings_to_model_forward(YOSO_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, MaskedLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.yoso( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class YosoClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) self.config = config def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = ACT2FN[self.config.hidden_act](x) x = self.dropout(x) x = self.out_proj(x) return x @add_start_docstrings( """YOSO Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.""", YOSO_START_DOCSTRING, ) class YosoForSequenceClassification(YosoPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.yoso = YosoModel(config) self.classifier = YosoClassificationHead(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(YOSO_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.yoso( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """YOSO Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.""", YOSO_START_DOCSTRING, ) class YosoForMultipleChoice(YosoPreTrainedModel): def __init__(self, config): super().__init__(config) self.yoso = YosoModel(config) self.pre_classifier = nn.Linear(config.hidden_size, config.hidden_size) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(YOSO_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, MultipleChoiceModelOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.yoso( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_state = outputs[0] # (bs * num_choices, seq_len, dim) pooled_output = hidden_state[:, 0] # (bs * num_choices, dim) pooled_output = self.pre_classifier(pooled_output) # (bs * num_choices, dim) pooled_output = nn.ReLU()(pooled_output) # (bs * num_choices, dim) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """YOSO Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.""", YOSO_START_DOCSTRING, ) class YosoForTokenClassification(YosoPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.yoso = YosoModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(YOSO_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.yoso( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels) active_labels = torch.where( active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """YOSO Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).""", YOSO_START_DOCSTRING, ) class YosoForQuestionAnswering(YosoPreTrainedModel): def __init__(self, config): super().__init__(config) config.num_labels = 2 self.num_labels = config.num_labels self.yoso = YosoModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(YOSO_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, start_positions: Optional[torch.Tensor] = None, end_positions: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, QuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.yoso( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
transformers/src/transformers/models/yoso/modeling_yoso.py/0
{ "file_path": "transformers/src/transformers/models/yoso/modeling_yoso.py", "repo_id": "transformers", "token_count": 23753 }
378
# Copyright 2022 The Impira Team and the HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from typing import List, Optional, Tuple, Union import numpy as np from ..utils import ( ExplicitEnum, add_end_docstrings, is_pytesseract_available, is_torch_available, is_vision_available, logging, ) from .base import ChunkPipeline, build_pipeline_init_args from .question_answering import select_starts_ends if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES TESSERACT_LOADED = False if is_pytesseract_available(): TESSERACT_LOADED = True import pytesseract logger = logging.get_logger(__name__) # normalize_bbox() and apply_tesseract() are derived from apply_tesseract in models/layoutlmv3/feature_extraction_layoutlmv3.py. # However, because the pipeline may evolve from what layoutlmv3 currently does, it's copied (vs. imported) to avoid creating an # unnecessary dependency. def normalize_box(box, width, height): return [ int(1000 * (box[0] / width)), int(1000 * (box[1] / height)), int(1000 * (box[2] / width)), int(1000 * (box[3] / height)), ] def apply_tesseract(image: "Image.Image", lang: Optional[str], tesseract_config: Optional[str]): """Applies Tesseract OCR on a document image, and returns recognized words + normalized bounding boxes.""" # apply OCR data = pytesseract.image_to_data(image, lang=lang, output_type="dict", config=tesseract_config) words, left, top, width, height = data["text"], data["left"], data["top"], data["width"], data["height"] # filter empty words and corresponding coordinates irrelevant_indices = [idx for idx, word in enumerate(words) if not word.strip()] words = [word for idx, word in enumerate(words) if idx not in irrelevant_indices] left = [coord for idx, coord in enumerate(left) if idx not in irrelevant_indices] top = [coord for idx, coord in enumerate(top) if idx not in irrelevant_indices] width = [coord for idx, coord in enumerate(width) if idx not in irrelevant_indices] height = [coord for idx, coord in enumerate(height) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format actual_boxes = [] for x, y, w, h in zip(left, top, width, height): actual_box = [x, y, x + w, y + h] actual_boxes.append(actual_box) image_width, image_height = image.size # finally, normalize the bounding boxes normalized_boxes = [] for box in actual_boxes: normalized_boxes.append(normalize_box(box, image_width, image_height)) if len(words) != len(normalized_boxes): raise ValueError("Not as many words as there are bounding boxes") return words, normalized_boxes class ModelType(ExplicitEnum): LayoutLM = "layoutlm" LayoutLMv2andv3 = "layoutlmv2andv3" VisionEncoderDecoder = "vision_encoder_decoder" @add_end_docstrings(build_pipeline_init_args(has_image_processor=True, has_tokenizer=True)) class DocumentQuestionAnsweringPipeline(ChunkPipeline): # TODO: Update task_summary docs to include an example with document QA and then update the first sentence """ Document Question Answering pipeline using any `AutoModelForDocumentQuestionAnswering`. The inputs/outputs are similar to the (extractive) question answering pipeline; however, the pipeline takes an image (and optional OCR'd words/boxes) as input instead of text context. Example: ```python >>> from transformers import pipeline >>> document_qa = pipeline(model="impira/layoutlm-document-qa") >>> document_qa( ... image="https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png", ... question="What is the invoice number?", ... ) [{'score': 0.425, 'answer': 'us-001', 'start': 16, 'end': 16}] ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This document question answering pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"document-question-answering"`. The models that this pipeline can use are models that have been fine-tuned on a document question answering task. See the up-to-date list of available models on [huggingface.co/models](https://huggingface.co/models?filter=document-question-answering). """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if self.tokenizer is not None and not self.tokenizer.__class__.__name__.endswith("Fast"): raise ValueError( "`DocumentQuestionAnsweringPipeline` requires a fast tokenizer, but a slow tokenizer " f"(`{self.tokenizer.__class__.__name__}`) is provided." ) if self.model.config.__class__.__name__ == "VisionEncoderDecoderConfig": self.model_type = ModelType.VisionEncoderDecoder if self.model.config.encoder.model_type != "donut-swin": raise ValueError("Currently, the only supported VisionEncoderDecoder model is Donut") else: self.check_model_type(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES) if self.model.config.__class__.__name__ == "LayoutLMConfig": self.model_type = ModelType.LayoutLM else: self.model_type = ModelType.LayoutLMv2andv3 def _sanitize_parameters( self, padding=None, doc_stride=None, max_question_len=None, lang: Optional[str] = None, tesseract_config: Optional[str] = None, max_answer_len=None, max_seq_len=None, top_k=None, handle_impossible_answer=None, timeout=None, **kwargs, ): preprocess_params, postprocess_params = {}, {} if padding is not None: preprocess_params["padding"] = padding if doc_stride is not None: preprocess_params["doc_stride"] = doc_stride if max_question_len is not None: preprocess_params["max_question_len"] = max_question_len if max_seq_len is not None: preprocess_params["max_seq_len"] = max_seq_len if lang is not None: preprocess_params["lang"] = lang if tesseract_config is not None: preprocess_params["tesseract_config"] = tesseract_config if timeout is not None: preprocess_params["timeout"] = timeout if top_k is not None: if top_k < 1: raise ValueError(f"top_k parameter should be >= 1 (got {top_k})") postprocess_params["top_k"] = top_k if max_answer_len is not None: if max_answer_len < 1: raise ValueError(f"max_answer_len parameter should be >= 1 (got {max_answer_len}") postprocess_params["max_answer_len"] = max_answer_len if handle_impossible_answer is not None: postprocess_params["handle_impossible_answer"] = handle_impossible_answer return preprocess_params, {}, postprocess_params def __call__( self, image: Union["Image.Image", str], question: Optional[str] = None, word_boxes: Tuple[str, List[float]] = None, **kwargs, ): """ Answer the question(s) given as inputs by using the document(s). A document is defined as an image and an optional list of (word, box) tuples which represent the text in the document. If the `word_boxes` are not provided, it will use the Tesseract OCR engine (if available) to extract the words and boxes automatically for LayoutLM-like models which require them as input. For Donut, no OCR is run. You can invoke the pipeline several ways: - `pipeline(image=image, question=question)` - `pipeline(image=image, question=question, word_boxes=word_boxes)` - `pipeline([{"image": image, "question": question}])` - `pipeline([{"image": image, "question": question, "word_boxes": word_boxes}])` Args: image (`str` or `PIL.Image`): The pipeline handles three types of images: - A string containing a http link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images. If given a single image, it can be broadcasted to multiple questions. question (`str`): A question to ask of the document. word_boxes (`List[str, Tuple[float, float, float, float]]`, *optional*): A list of words and bounding boxes (normalized 0->1000). If you provide this optional input, then the pipeline will use these words and boxes instead of running OCR on the image to derive them for models that need them (e.g. LayoutLM). This allows you to reuse OCR'd results across many invocations of the pipeline without having to re-run it each time. top_k (`int`, *optional*, defaults to 1): The number of answers to return (will be chosen by order of likelihood). Note that we return less than top_k answers if there are not enough options available within the context. doc_stride (`int`, *optional*, defaults to 128): If the words in the document are too long to fit with the question for the model, it will be split in several chunks with some overlap. This argument controls the size of that overlap. max_answer_len (`int`, *optional*, defaults to 15): The maximum length of predicted answers (e.g., only answers with a shorter length are considered). max_seq_len (`int`, *optional*, defaults to 384): The maximum length of the total sentence (context + question) in tokens of each chunk passed to the model. The context will be split in several chunks (using `doc_stride` as overlap) if needed. max_question_len (`int`, *optional*, defaults to 64): The maximum length of the question after tokenization. It will be truncated if needed. handle_impossible_answer (`bool`, *optional*, defaults to `False`): Whether or not we accept impossible as an answer. lang (`str`, *optional*): Language to use while running OCR. Defaults to english. tesseract_config (`str`, *optional*): Additional flags to pass to tesseract while running OCR. timeout (`float`, *optional*, defaults to None): The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and the call may block forever. Return: A `dict` or a list of `dict`: Each result comes as a dictionary with the following keys: - **score** (`float`) -- The probability associated to the answer. - **start** (`int`) -- The start word index of the answer (in the OCR'd version of the input or provided `word_boxes`). - **end** (`int`) -- The end word index of the answer (in the OCR'd version of the input or provided `word_boxes`). - **answer** (`str`) -- The answer to the question. - **words** (`list[int]`) -- The index of each word/box pair that is in the answer """ if isinstance(question, str): inputs = {"question": question, "image": image} if word_boxes is not None: inputs["word_boxes"] = word_boxes else: inputs = image return super().__call__(inputs, **kwargs) def preprocess( self, input, padding="do_not_pad", doc_stride=None, max_seq_len=None, word_boxes: Tuple[str, List[float]] = None, lang=None, tesseract_config="", timeout=None, ): # NOTE: This code mirrors the code in question answering and will be implemented in a follow up PR # to support documents with enough tokens that overflow the model's window if max_seq_len is None: max_seq_len = self.tokenizer.model_max_length if doc_stride is None: doc_stride = min(max_seq_len // 2, 256) image = None image_features = {} if input.get("image", None) is not None: image = load_image(input["image"], timeout=timeout) if self.image_processor is not None: image_features.update(self.image_processor(images=image, return_tensors=self.framework)) elif self.feature_extractor is not None: image_features.update(self.feature_extractor(images=image, return_tensors=self.framework)) elif self.model_type == ModelType.VisionEncoderDecoder: raise ValueError("If you are using a VisionEncoderDecoderModel, you must provide a feature extractor") words, boxes = None, None if not self.model_type == ModelType.VisionEncoderDecoder: if "word_boxes" in input: words = [x[0] for x in input["word_boxes"]] boxes = [x[1] for x in input["word_boxes"]] elif "words" in image_features and "boxes" in image_features: words = image_features.pop("words")[0] boxes = image_features.pop("boxes")[0] elif image is not None: if not TESSERACT_LOADED: raise ValueError( "If you provide an image without word_boxes, then the pipeline will run OCR using Tesseract," " but pytesseract is not available" ) if TESSERACT_LOADED: words, boxes = apply_tesseract(image, lang=lang, tesseract_config=tesseract_config) else: raise ValueError( "You must provide an image or word_boxes. If you provide an image, the pipeline will automatically" " run OCR to derive words and boxes" ) if self.tokenizer.padding_side != "right": raise ValueError( "Document question answering only supports tokenizers whose padding side is 'right', not" f" {self.tokenizer.padding_side}" ) if self.model_type == ModelType.VisionEncoderDecoder: task_prompt = f'<s_docvqa><s_question>{input["question"]}</s_question><s_answer>' # Adapted from https://huggingface.co/spaces/nielsr/donut-docvqa/blob/main/app.py encoding = { "inputs": image_features["pixel_values"], "decoder_input_ids": self.tokenizer( task_prompt, add_special_tokens=False, return_tensors=self.framework ).input_ids, "return_dict_in_generate": True, } yield { **encoding, "p_mask": None, "word_ids": None, "words": None, "output_attentions": True, "is_last": True, } else: tokenizer_kwargs = {} if self.model_type == ModelType.LayoutLM: tokenizer_kwargs["text"] = input["question"].split() tokenizer_kwargs["text_pair"] = words tokenizer_kwargs["is_split_into_words"] = True else: tokenizer_kwargs["text"] = [input["question"]] tokenizer_kwargs["text_pair"] = [words] tokenizer_kwargs["boxes"] = [boxes] encoding = self.tokenizer( padding=padding, max_length=max_seq_len, stride=doc_stride, return_token_type_ids=True, truncation="only_second", return_overflowing_tokens=True, **tokenizer_kwargs, ) # TODO: check why slower `LayoutLMTokenizer` and `LayoutLMv2Tokenizer` don't have this key in outputs # FIXME: ydshieh and/or Narsil encoding.pop("overflow_to_sample_mapping", None) # We do not use this num_spans = len(encoding["input_ids"]) # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer) # We put 0 on the tokens from the context and 1 everywhere else (question and special tokens) # This logic mirrors the logic in the question_answering pipeline p_mask = [[tok != 1 for tok in encoding.sequence_ids(span_id)] for span_id in range(num_spans)] for span_idx in range(num_spans): if self.framework == "pt": span_encoding = {k: torch.tensor(v[span_idx : span_idx + 1]) for (k, v) in encoding.items()} if "pixel_values" in image_features: span_encoding["image"] = image_features["pixel_values"] else: raise ValueError("Unsupported: Tensorflow preprocessing for DocumentQuestionAnsweringPipeline") input_ids_span_idx = encoding["input_ids"][span_idx] # keep the cls_token unmasked (some models use it to indicate unanswerable questions) if self.tokenizer.cls_token_id is not None: cls_indices = np.nonzero(np.array(input_ids_span_idx) == self.tokenizer.cls_token_id)[0] for cls_index in cls_indices: p_mask[span_idx][cls_index] = 0 # For each span, place a bounding box [0,0,0,0] for question and CLS tokens, [1000,1000,1000,1000] # for SEP tokens, and the word's bounding box for words in the original document. if "boxes" not in tokenizer_kwargs: bbox = [] for input_id, sequence_id, word_id in zip( encoding.input_ids[span_idx], encoding.sequence_ids(span_idx), encoding.word_ids(span_idx), ): if sequence_id == 1: bbox.append(boxes[word_id]) elif input_id == self.tokenizer.sep_token_id: bbox.append([1000] * 4) else: bbox.append([0] * 4) if self.framework == "pt": span_encoding["bbox"] = torch.tensor(bbox).unsqueeze(0) elif self.framework == "tf": raise ValueError("Unsupported: Tensorflow preprocessing for DocumentQuestionAnsweringPipeline") yield { **span_encoding, "p_mask": p_mask[span_idx], "word_ids": encoding.word_ids(span_idx), "words": words, "is_last": span_idx == num_spans - 1, } def _forward(self, model_inputs): p_mask = model_inputs.pop("p_mask", None) word_ids = model_inputs.pop("word_ids", None) words = model_inputs.pop("words", None) is_last = model_inputs.pop("is_last", False) if self.model_type == ModelType.VisionEncoderDecoder: model_outputs = self.model.generate(**model_inputs) else: model_outputs = self.model(**model_inputs) model_outputs = dict(model_outputs.items()) model_outputs["p_mask"] = p_mask model_outputs["word_ids"] = word_ids model_outputs["words"] = words model_outputs["attention_mask"] = model_inputs.get("attention_mask", None) model_outputs["is_last"] = is_last return model_outputs def postprocess(self, model_outputs, top_k=1, **kwargs): if self.model_type == ModelType.VisionEncoderDecoder: answers = [self.postprocess_encoder_decoder_single(o) for o in model_outputs] else: answers = self.postprocess_extractive_qa(model_outputs, top_k=top_k, **kwargs) answers = sorted(answers, key=lambda x: x.get("score", 0), reverse=True)[:top_k] return answers def postprocess_encoder_decoder_single(self, model_outputs, **kwargs): sequence = self.tokenizer.batch_decode(model_outputs["sequences"])[0] # TODO: A lot of this logic is specific to Donut and should probably be handled in the tokenizer # (see https://github.com/huggingface/transformers/pull/18414/files#r961747408 for more context). sequence = sequence.replace(self.tokenizer.eos_token, "").replace(self.tokenizer.pad_token, "") sequence = re.sub(r"<.*?>", "", sequence, count=1).strip() # remove first task start token ret = { "answer": None, } answer = re.search(r"<s_answer>(.*)</s_answer>", sequence) if answer is not None: ret["answer"] = answer.group(1).strip() return ret def postprocess_extractive_qa( self, model_outputs, top_k=1, handle_impossible_answer=False, max_answer_len=15, **kwargs ): min_null_score = 1000000 # large and positive answers = [] for output in model_outputs: words = output["words"] starts, ends, scores, min_null_score = select_starts_ends( start=output["start_logits"], end=output["end_logits"], p_mask=output["p_mask"], attention_mask=output["attention_mask"].numpy() if output.get("attention_mask", None) is not None else None, min_null_score=min_null_score, top_k=top_k, handle_impossible_answer=handle_impossible_answer, max_answer_len=max_answer_len, ) word_ids = output["word_ids"] for start, end, score in zip(starts, ends, scores): word_start, word_end = word_ids[start], word_ids[end] if word_start is not None and word_end is not None: answers.append( { "score": float(score), "answer": " ".join(words[word_start : word_end + 1]), "start": word_start, "end": word_end, } ) if handle_impossible_answer: answers.append({"score": min_null_score, "answer": "", "start": 0, "end": 0}) return answers
transformers/src/transformers/pipelines/document_question_answering.py/0
{ "file_path": "transformers/src/transformers/pipelines/document_question_answering.py", "repo_id": "transformers", "token_count": 10332 }
379
import types import warnings from typing import List, Optional, Tuple, Union import numpy as np from ..models.bert.tokenization_bert import BasicTokenizer from ..utils import ( ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available, ) from .base import ArgumentHandler, ChunkPipeline, Dataset, build_pipeline_init_args if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES class TokenClassificationArgumentHandler(ArgumentHandler): """ Handles arguments for token classification. """ def __call__(self, inputs: Union[str, List[str]], **kwargs): if inputs is not None and isinstance(inputs, (list, tuple)) and len(inputs) > 0: inputs = list(inputs) batch_size = len(inputs) elif isinstance(inputs, str): inputs = [inputs] batch_size = 1 elif Dataset is not None and isinstance(inputs, Dataset) or isinstance(inputs, types.GeneratorType): return inputs, None else: raise ValueError("At least one input is required.") offset_mapping = kwargs.get("offset_mapping") if offset_mapping: if isinstance(offset_mapping, list) and isinstance(offset_mapping[0], tuple): offset_mapping = [offset_mapping] if len(offset_mapping) != batch_size: raise ValueError("offset_mapping should have the same batch size as the input") return inputs, offset_mapping class AggregationStrategy(ExplicitEnum): """All the valid aggregation strategies for TokenClassificationPipeline""" NONE = "none" SIMPLE = "simple" FIRST = "first" AVERAGE = "average" MAX = "max" @add_end_docstrings( build_pipeline_init_args(has_tokenizer=True), r""" ignore_labels (`List[str]`, defaults to `["O"]`): A list of labels to ignore. grouped_entities (`bool`, *optional*, defaults to `False`): DEPRECATED, use `aggregation_strategy` instead. Whether or not to group the tokens corresponding to the same entity together in the predictions or not. stride (`int`, *optional*): If stride is provided, the pipeline is applied on all the text. The text is split into chunks of size model_max_length. Works only with fast tokenizers and `aggregation_strategy` different from `NONE`. The value of this argument defines the number of overlapping tokens between chunks. In other words, the model will shift forward by `tokenizer.model_max_length - stride` tokens each step. aggregation_strategy (`str`, *optional*, defaults to `"none"`): The strategy to fuse (or not) tokens based on the model prediction. - "none" : Will simply not do any aggregation and simply return raw results from the model - "simple" : Will attempt to group entities following the default schema. (A, B-TAG), (B, I-TAG), (C, I-TAG), (D, B-TAG2) (E, B-TAG2) will end up being [{"word": ABC, "entity": "TAG"}, {"word": "D", "entity": "TAG2"}, {"word": "E", "entity": "TAG2"}] Notice that two consecutive B tags will end up as different entities. On word based languages, we might end up splitting words undesirably : Imagine Microsoft being tagged as [{"word": "Micro", "entity": "ENTERPRISE"}, {"word": "soft", "entity": "NAME"}]. Look for FIRST, MAX, AVERAGE for ways to mitigate that and disambiguate words (on languages that support that meaning, which is basically tokens separated by a space). These mitigations will only work on real words, "New york" might still be tagged with two different entities. - "first" : (works only on word based models) Will use the `SIMPLE` strategy except that words, cannot end up with different tags. Words will simply use the tag of the first token of the word when there is ambiguity. - "average" : (works only on word based models) Will use the `SIMPLE` strategy except that words, cannot end up with different tags. scores will be averaged first across tokens, and then the maximum label is applied. - "max" : (works only on word based models) Will use the `SIMPLE` strategy except that words, cannot end up with different tags. Word entity will simply be the token with the maximum score.""", ) class TokenClassificationPipeline(ChunkPipeline): """ Named Entity Recognition pipeline using any `ModelForTokenClassification`. See the [named entity recognition examples](../task_summary#named-entity-recognition) for more information. Example: ```python >>> from transformers import pipeline >>> token_classifier = pipeline(model="Jean-Baptiste/camembert-ner", aggregation_strategy="simple") >>> sentence = "Je m'appelle jean-baptiste et je vis à montréal" >>> tokens = token_classifier(sentence) >>> tokens [{'entity_group': 'PER', 'score': 0.9931, 'word': 'jean-baptiste', 'start': 12, 'end': 26}, {'entity_group': 'LOC', 'score': 0.998, 'word': 'montréal', 'start': 38, 'end': 47}] >>> token = tokens[0] >>> # Start and end provide an easy way to highlight words in the original text. >>> sentence[token["start"] : token["end"]] ' jean-baptiste' >>> # Some models use the same idea to do part of speech. >>> syntaxer = pipeline(model="vblagoje/bert-english-uncased-finetuned-pos", aggregation_strategy="simple") >>> syntaxer("My name is Sarah and I live in London") [{'entity_group': 'PRON', 'score': 0.999, 'word': 'my', 'start': 0, 'end': 2}, {'entity_group': 'NOUN', 'score': 0.997, 'word': 'name', 'start': 3, 'end': 7}, {'entity_group': 'AUX', 'score': 0.994, 'word': 'is', 'start': 8, 'end': 10}, {'entity_group': 'PROPN', 'score': 0.999, 'word': 'sarah', 'start': 11, 'end': 16}, {'entity_group': 'CCONJ', 'score': 0.999, 'word': 'and', 'start': 17, 'end': 20}, {'entity_group': 'PRON', 'score': 0.999, 'word': 'i', 'start': 21, 'end': 22}, {'entity_group': 'VERB', 'score': 0.998, 'word': 'live', 'start': 23, 'end': 27}, {'entity_group': 'ADP', 'score': 0.999, 'word': 'in', 'start': 28, 'end': 30}, {'entity_group': 'PROPN', 'score': 0.999, 'word': 'london', 'start': 31, 'end': 37}] ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This token recognition pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"ner"` (for predicting the classes of tokens in a sequence: person, organisation, location or miscellaneous). The models that this pipeline can use are models that have been fine-tuned on a token classification task. See the up-to-date list of available models on [huggingface.co/models](https://huggingface.co/models?filter=token-classification). """ default_input_names = "sequences" def __init__(self, args_parser=TokenClassificationArgumentHandler(), *args, **kwargs): super().__init__(*args, **kwargs) self.check_model_type( TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES if self.framework == "tf" else MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) self._basic_tokenizer = BasicTokenizer(do_lower_case=False) self._args_parser = args_parser def _sanitize_parameters( self, ignore_labels=None, grouped_entities: Optional[bool] = None, ignore_subwords: Optional[bool] = None, aggregation_strategy: Optional[AggregationStrategy] = None, offset_mapping: Optional[List[Tuple[int, int]]] = None, stride: Optional[int] = None, ): preprocess_params = {} if offset_mapping is not None: preprocess_params["offset_mapping"] = offset_mapping postprocess_params = {} if grouped_entities is not None or ignore_subwords is not None: if grouped_entities and ignore_subwords: aggregation_strategy = AggregationStrategy.FIRST elif grouped_entities and not ignore_subwords: aggregation_strategy = AggregationStrategy.SIMPLE else: aggregation_strategy = AggregationStrategy.NONE if grouped_entities is not None: warnings.warn( "`grouped_entities` is deprecated and will be removed in version v5.0.0, defaulted to" f' `aggregation_strategy="{aggregation_strategy}"` instead.' ) if ignore_subwords is not None: warnings.warn( "`ignore_subwords` is deprecated and will be removed in version v5.0.0, defaulted to" f' `aggregation_strategy="{aggregation_strategy}"` instead.' ) if aggregation_strategy is not None: if isinstance(aggregation_strategy, str): aggregation_strategy = AggregationStrategy[aggregation_strategy.upper()] if ( aggregation_strategy in {AggregationStrategy.FIRST, AggregationStrategy.MAX, AggregationStrategy.AVERAGE} and not self.tokenizer.is_fast ): raise ValueError( "Slow tokenizers cannot handle subwords. Please set the `aggregation_strategy` option" ' to `"simple"` or use a fast tokenizer.' ) postprocess_params["aggregation_strategy"] = aggregation_strategy if ignore_labels is not None: postprocess_params["ignore_labels"] = ignore_labels if stride is not None: if stride >= self.tokenizer.model_max_length: raise ValueError( "`stride` must be less than `tokenizer.model_max_length` (or even lower if the tokenizer adds special tokens)" ) if aggregation_strategy == AggregationStrategy.NONE: raise ValueError( "`stride` was provided to process all the text but `aggregation_strategy=" f'"{aggregation_strategy}"`, please select another one instead.' ) else: if self.tokenizer.is_fast: tokenizer_params = { "return_overflowing_tokens": True, "padding": True, "stride": stride, } preprocess_params["tokenizer_params"] = tokenizer_params else: raise ValueError( "`stride` was provided to process all the text but you're using a slow tokenizer." " Please use a fast tokenizer." ) return preprocess_params, {}, postprocess_params def __call__(self, inputs: Union[str, List[str]], **kwargs): """ Classify each token of the text(s) given as inputs. Args: inputs (`str` or `List[str]`): One or several texts (or one list of texts) for token classification. Return: A list or a list of list of `dict`: Each result comes as a list of dictionaries (one for each token in the corresponding input, or each entity if this pipeline was instantiated with an aggregation_strategy) with the following keys: - **word** (`str`) -- The token/word classified. This is obtained by decoding the selected tokens. If you want to have the exact string in the original sentence, use `start` and `end`. - **score** (`float`) -- The corresponding probability for `entity`. - **entity** (`str`) -- The entity predicted for that token/word (it is named *entity_group* when *aggregation_strategy* is not `"none"`. - **index** (`int`, only present when `aggregation_strategy="none"`) -- The index of the corresponding token in the sentence. - **start** (`int`, *optional*) -- The index of the start of the corresponding entity in the sentence. Only exists if the offsets are available within the tokenizer - **end** (`int`, *optional*) -- The index of the end of the corresponding entity in the sentence. Only exists if the offsets are available within the tokenizer """ _inputs, offset_mapping = self._args_parser(inputs, **kwargs) if offset_mapping: kwargs["offset_mapping"] = offset_mapping return super().__call__(inputs, **kwargs) def preprocess(self, sentence, offset_mapping=None, **preprocess_params): tokenizer_params = preprocess_params.pop("tokenizer_params", {}) truncation = True if self.tokenizer.model_max_length and self.tokenizer.model_max_length > 0 else False inputs = self.tokenizer( sentence, return_tensors=self.framework, truncation=truncation, return_special_tokens_mask=True, return_offsets_mapping=self.tokenizer.is_fast, **tokenizer_params, ) inputs.pop("overflow_to_sample_mapping", None) num_chunks = len(inputs["input_ids"]) for i in range(num_chunks): if self.framework == "tf": model_inputs = {k: tf.expand_dims(v[i], 0) for k, v in inputs.items()} else: model_inputs = {k: v[i].unsqueeze(0) for k, v in inputs.items()} if offset_mapping is not None: model_inputs["offset_mapping"] = offset_mapping model_inputs["sentence"] = sentence if i == 0 else None model_inputs["is_last"] = i == num_chunks - 1 yield model_inputs def _forward(self, model_inputs): # Forward special_tokens_mask = model_inputs.pop("special_tokens_mask") offset_mapping = model_inputs.pop("offset_mapping", None) sentence = model_inputs.pop("sentence") is_last = model_inputs.pop("is_last") if self.framework == "tf": logits = self.model(**model_inputs)[0] else: output = self.model(**model_inputs) logits = output["logits"] if isinstance(output, dict) else output[0] return { "logits": logits, "special_tokens_mask": special_tokens_mask, "offset_mapping": offset_mapping, "sentence": sentence, "is_last": is_last, **model_inputs, } def postprocess(self, all_outputs, aggregation_strategy=AggregationStrategy.NONE, ignore_labels=None): if ignore_labels is None: ignore_labels = ["O"] all_entities = [] for model_outputs in all_outputs: logits = model_outputs["logits"][0].numpy() sentence = all_outputs[0]["sentence"] input_ids = model_outputs["input_ids"][0] offset_mapping = ( model_outputs["offset_mapping"][0] if model_outputs["offset_mapping"] is not None else None ) special_tokens_mask = model_outputs["special_tokens_mask"][0].numpy() maxes = np.max(logits, axis=-1, keepdims=True) shifted_exp = np.exp(logits - maxes) scores = shifted_exp / shifted_exp.sum(axis=-1, keepdims=True) if self.framework == "tf": input_ids = input_ids.numpy() offset_mapping = offset_mapping.numpy() if offset_mapping is not None else None pre_entities = self.gather_pre_entities( sentence, input_ids, scores, offset_mapping, special_tokens_mask, aggregation_strategy ) grouped_entities = self.aggregate(pre_entities, aggregation_strategy) # Filter anything that is in self.ignore_labels entities = [ entity for entity in grouped_entities if entity.get("entity", None) not in ignore_labels and entity.get("entity_group", None) not in ignore_labels ] all_entities.extend(entities) num_chunks = len(all_outputs) if num_chunks > 1: all_entities = self.aggregate_overlapping_entities(all_entities) return all_entities def aggregate_overlapping_entities(self, entities): if len(entities) == 0: return entities entities = sorted(entities, key=lambda x: x["start"]) aggregated_entities = [] previous_entity = entities[0] for entity in entities: if previous_entity["start"] <= entity["start"] < previous_entity["end"]: current_length = entity["end"] - entity["start"] previous_length = previous_entity["end"] - previous_entity["start"] if current_length > previous_length: previous_entity = entity elif current_length == previous_length and entity["score"] > previous_entity["score"]: previous_entity = entity else: aggregated_entities.append(previous_entity) previous_entity = entity aggregated_entities.append(previous_entity) return aggregated_entities def gather_pre_entities( self, sentence: str, input_ids: np.ndarray, scores: np.ndarray, offset_mapping: Optional[List[Tuple[int, int]]], special_tokens_mask: np.ndarray, aggregation_strategy: AggregationStrategy, ) -> List[dict]: """Fuse various numpy arrays into dicts with all the information needed for aggregation""" pre_entities = [] for idx, token_scores in enumerate(scores): # Filter special_tokens if special_tokens_mask[idx]: continue word = self.tokenizer.convert_ids_to_tokens(int(input_ids[idx])) if offset_mapping is not None: start_ind, end_ind = offset_mapping[idx] if not isinstance(start_ind, int): if self.framework == "pt": start_ind = start_ind.item() end_ind = end_ind.item() word_ref = sentence[start_ind:end_ind] if getattr(self.tokenizer, "_tokenizer", None) and getattr( self.tokenizer._tokenizer.model, "continuing_subword_prefix", None ): # This is a BPE, word aware tokenizer, there is a correct way # to fuse tokens is_subword = len(word) != len(word_ref) else: # This is a fallback heuristic. This will fail most likely on any kind of text + punctuation mixtures that will be considered "words". Non word aware models cannot do better than this unfortunately. if aggregation_strategy in { AggregationStrategy.FIRST, AggregationStrategy.AVERAGE, AggregationStrategy.MAX, }: warnings.warn( "Tokenizer does not support real words, using fallback heuristic", UserWarning, ) is_subword = start_ind > 0 and " " not in sentence[start_ind - 1 : start_ind + 1] if int(input_ids[idx]) == self.tokenizer.unk_token_id: word = word_ref is_subword = False else: start_ind = None end_ind = None is_subword = False pre_entity = { "word": word, "scores": token_scores, "start": start_ind, "end": end_ind, "index": idx, "is_subword": is_subword, } pre_entities.append(pre_entity) return pre_entities def aggregate(self, pre_entities: List[dict], aggregation_strategy: AggregationStrategy) -> List[dict]: if aggregation_strategy in {AggregationStrategy.NONE, AggregationStrategy.SIMPLE}: entities = [] for pre_entity in pre_entities: entity_idx = pre_entity["scores"].argmax() score = pre_entity["scores"][entity_idx] entity = { "entity": self.model.config.id2label[entity_idx], "score": score, "index": pre_entity["index"], "word": pre_entity["word"], "start": pre_entity["start"], "end": pre_entity["end"], } entities.append(entity) else: entities = self.aggregate_words(pre_entities, aggregation_strategy) if aggregation_strategy == AggregationStrategy.NONE: return entities return self.group_entities(entities) def aggregate_word(self, entities: List[dict], aggregation_strategy: AggregationStrategy) -> dict: word = self.tokenizer.convert_tokens_to_string([entity["word"] for entity in entities]) if aggregation_strategy == AggregationStrategy.FIRST: scores = entities[0]["scores"] idx = scores.argmax() score = scores[idx] entity = self.model.config.id2label[idx] elif aggregation_strategy == AggregationStrategy.MAX: max_entity = max(entities, key=lambda entity: entity["scores"].max()) scores = max_entity["scores"] idx = scores.argmax() score = scores[idx] entity = self.model.config.id2label[idx] elif aggregation_strategy == AggregationStrategy.AVERAGE: scores = np.stack([entity["scores"] for entity in entities]) average_scores = np.nanmean(scores, axis=0) entity_idx = average_scores.argmax() entity = self.model.config.id2label[entity_idx] score = average_scores[entity_idx] else: raise ValueError("Invalid aggregation_strategy") new_entity = { "entity": entity, "score": score, "word": word, "start": entities[0]["start"], "end": entities[-1]["end"], } return new_entity def aggregate_words(self, entities: List[dict], aggregation_strategy: AggregationStrategy) -> List[dict]: """ Override tokens from a given word that disagree to force agreement on word boundaries. Example: micro|soft| com|pany| B-ENT I-NAME I-ENT I-ENT will be rewritten with first strategy as microsoft| company| B-ENT I-ENT """ if aggregation_strategy in { AggregationStrategy.NONE, AggregationStrategy.SIMPLE, }: raise ValueError("NONE and SIMPLE strategies are invalid for word aggregation") word_entities = [] word_group = None for entity in entities: if word_group is None: word_group = [entity] elif entity["is_subword"]: word_group.append(entity) else: word_entities.append(self.aggregate_word(word_group, aggregation_strategy)) word_group = [entity] # Last item if word_group is not None: word_entities.append(self.aggregate_word(word_group, aggregation_strategy)) return word_entities def group_sub_entities(self, entities: List[dict]) -> dict: """ Group together the adjacent tokens with the same entity predicted. Args: entities (`dict`): The entities predicted by the pipeline. """ # Get the first entity in the entity group entity = entities[0]["entity"].split("-", 1)[-1] scores = np.nanmean([entity["score"] for entity in entities]) tokens = [entity["word"] for entity in entities] entity_group = { "entity_group": entity, "score": np.mean(scores), "word": self.tokenizer.convert_tokens_to_string(tokens), "start": entities[0]["start"], "end": entities[-1]["end"], } return entity_group def get_tag(self, entity_name: str) -> Tuple[str, str]: if entity_name.startswith("B-"): bi = "B" tag = entity_name[2:] elif entity_name.startswith("I-"): bi = "I" tag = entity_name[2:] else: # It's not in B-, I- format # Default to I- for continuation. bi = "I" tag = entity_name return bi, tag def group_entities(self, entities: List[dict]) -> List[dict]: """ Find and group together the adjacent tokens with the same entity predicted. Args: entities (`dict`): The entities predicted by the pipeline. """ entity_groups = [] entity_group_disagg = [] for entity in entities: if not entity_group_disagg: entity_group_disagg.append(entity) continue # If the current entity is similar and adjacent to the previous entity, # append it to the disaggregated entity group # The split is meant to account for the "B" and "I" prefixes # Shouldn't merge if both entities are B-type bi, tag = self.get_tag(entity["entity"]) last_bi, last_tag = self.get_tag(entity_group_disagg[-1]["entity"]) if tag == last_tag and bi != "B": # Modify subword type to be previous_type entity_group_disagg.append(entity) else: # If the current entity is different from the previous entity # aggregate the disaggregated entity group entity_groups.append(self.group_sub_entities(entity_group_disagg)) entity_group_disagg = [entity] if entity_group_disagg: # it's the last entity, add it to the entity groups entity_groups.append(self.group_sub_entities(entity_group_disagg)) return entity_groups NerPipeline = TokenClassificationPipeline
transformers/src/transformers/pipelines/token_classification.py/0
{ "file_path": "transformers/src/transformers/pipelines/token_classification.py", "repo_id": "transformers", "token_count": 11839 }
380
#!/usr/bin/env python # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .agents import BASE_PYTHON_TOOLS, clean_code_for_chat, clean_code_for_run from .python_interpreter import InterpretorError, evaluate ### Fake tools for test def classifier(text, labels): return f"This is the classification of {text} along {labels}." def translator(text, src_lang, tgt_lang): return f"This is the translation of {text} from {src_lang} to {tgt_lang}." def speaker(text): return f"This is actually a sound reading {text}." def transcriber(audio): if "sound" not in audio: raise ValueError(f"`audio` ({audio}) is not a sound.") return f"This is the transcribed text from {audio}." def image_generator(prompt): return f"This is actually an image representing {prompt}." def image_captioner(image): if "image" not in image: raise ValueError(f"`image` ({image}) is not an image.") return f"This is a description of {image}." def image_transformer(image, prompt): if "image" not in image: raise ValueError(f"`image` ({image}) is not an image.") return f"This is a transformation of {image} according to {prompt}." def question_answerer(text, question): return f"This is the answer to {question} from {text}." def image_qa(image, question): if "image" not in image: raise ValueError(f"`image` ({image}) is not an image.") return f"This is the answer to {question} from {image}." def text_downloader(url): return f"This is the content of {url}." def summarizer(text): return f"This is a summary of {text}." def video_generator(prompt, seconds=2): return f"A video of {prompt}" def document_qa(image, question): return f"This is the answer to {question} from the document {image}." def image_segmenter(image, prompt): return f"This is the mask of {prompt} in {image}" TEST_TOOLS = { "text_classifier": classifier, "translator": translator, "text_reader": speaker, "summarizer": summarizer, "transcriber": transcriber, "image_generator": image_generator, "image_captioner": image_captioner, "image_transformer": image_transformer, "text_qa": question_answerer, "text_downloader": text_downloader, "image_qa": image_qa, "video_generator": video_generator, "document_qa": document_qa, "image_segmenter": image_segmenter, } class Problem: """ A class regrouping all the information to solve a problem on which we will evaluate agents. Args: task (`str` ou `list[str]`): One or several descriptions of the task to perform. If a list, it should contain variations on the phrasing, but for the same task. inputs (`list[str]` or `dict[str, str]`): The inputs that will be fed to the tools. For this testing environment, only strings are accepted as values. Pass along a dictionary when you want to specify the values of each inputs, or just the list of inputs expected (the value used will be `<<input_name>>` in this case). answer (`str` or `list[str`]): The theoretical answer (or list of possible valid answers) to the problem, as code. """ def __init__(self, task, inputs, answer): self.task = task self.inputs = inputs self.answer = answer ### The list of problems the agent will be evaluated on. EVALUATION_TASKS = [ Problem( task=[ "Is the following `text` (in Spanish) positive or negative?", "Is the text in the variable `text` (in Spanish) positive or negative?", "Translate the following `text` from Spanish to English then tell me if its positive or negative.", ], inputs=["text"], answer="""text_classifier(translator(text, src_lang="Spanish", tgt_lang="English"), labels=["positive", "negative"])""", ), Problem( task=[ "Tell me out loud what the `image` contains.", "Describe the following `image` out loud.", "Find what is in the picture stored in `image` then read it out loud.", ], inputs=["image"], answer=[ "text_reader(image_captioner(image))", "text_reader(image_qa(image, question='What is in the image?'))", ], ), Problem( task=[ "Generate an image from the text given in `text_input`. Then transform it according to the text in `prompt`.", "Use the following `text_input` to generate an image, then transform it by using the text in `prompt`.", ], inputs=["text_input", "prompt"], answer="image_transformer(image_generator(text_input), prompt)", ), Problem( task=[ "Download the content of `url`, summarize it then generate an image from its content.", "Use a summary of the web page at `url` to generate an image.", "Summarize the content of the web page at `url`, and use the result to generate an image.", ], inputs=["url"], answer="image_generator(summarizer(text_downloader(url)))", ), Problem( task=[ "Transform the following `image` using the prompt in `text`. The prompt is in Spanish.", "Use the text prompt in `text` (in Spanish) to transform the following `image`.", "Translate the `text` from Spanish to English then use it to transform the picture in `image`.", ], inputs=["text", "image"], answer="image_transformer(image, translator(text, src_lang='Spanish', tgt_lang='English'))", ), Problem( task=[ "Download the content of `url`, summarize it then read it out loud to me.", "Read me a summary of the web page at `url`.", ], inputs=["url"], answer="text_reader(summarizer(text_downloader(url)))", ), Problem( task=[ "Generate an image from the text given in `text_input`.", ], inputs=["text_input"], answer="image_generator(text_input)", ), Problem( task=[ "Replace the beaver in the `image` by the `prompt`.", "Transform the `image` so that it contains the `prompt`.", "Use `prompt` to transform this `image`.", ], inputs=["image", "prompt"], answer="image_transformer(image, prompt)", ), Problem( task=[ "Provide me the summary of the `text`, then read it to me before transcribing it and translating it in French.", "Summarize `text`, read it out loud then transcribe the audio and translate it in French.", "Read me a summary of the `text` out loud. Transcribe this and translate it in French.", ], inputs=["text"], answer="translator(transcriber(text_reader(summarizer(text))), src_lang='English', tgt_lang='French')", ), Problem( task=["Generate a video of the `prompt`", "Animate a `prompt`", "Make me a short video using `prompt`."], inputs={"prompt": "A lobster swimming"}, answer="video_generator('A lobster swimming')", ), Problem( task=[ "Download the following file `url`, summarize it in a few words and generate a video from it." "Fetch the file at this `url`, summarize it, and create an animation out of it." ], inputs=["url"], answer="video_generator(summarizer(text_downloader(url)))", ), ] EVALUATION_CHATS = [ [ Problem( task=[ "Translate the following `text` from Spanish to English.", "Translate the following `text` from Spanish to English.", ], inputs=["text"], answer="translated_text=translator(text, src_lang='Spanish', tgt_lang='English')", ), Problem( task=[ "Is it positive or negative?", "Tell me if its positive or negative.", ], inputs=[], answer="text_classifier(translated_text, labels=['positive', 'negative'])", ), ], [ Problem( task=[ "What does this `image` contain?", "Describe the following `image`.", "Find what is in the picture stored in `image`", ], inputs=["image"], answer=[ "description=image_captioner(image)", "description=image_qa(image, question='What is in the image?')", ], ), Problem( task=["Now, read the description out loud.", "Great! Can you read it out loud?", "Read it out loud."], inputs=[], answer=["audio=text_reader(description)", "audio=text_reader(description)"], ), ], [ Problem( task=[ "Generate an image from the text given in `text_input`.", "Use the following `text_input` to generate an image", ], inputs=["text_input"], answer="image = image_generator(text_input)", ), Problem( task=[ "Transform it according to the text in `prompt`.", "Transform it by using the text in `prompt`.", ], inputs=["prompt"], answer="image_transformer(image, prompt)", ), ], [ Problem( task=[ "Download the content of `url` and summarize it.", "Summarize the content of the web page at `url`.", ], inputs=["url"], answer="summary = summarizer(text_downloader(url))", ), Problem( task=[ "Generate an image from its content.", "Use the previous result to generate an image.", ], inputs=[], answer="image_generator(summary)", ), ], [ Problem( task=[ "Translate this Spanish `text` in English.", "Translate the `text` from Spanish to English.", ], inputs=["text"], answer="translated_text = translator(text, src_lang='Spanish', tgt_lang='English')", ), Problem( task=[ "Transform the following `image` using the translated `text`.", "Use the previous result to transform the following `image`.", ], inputs=["image"], answer="image_transformer(image, translated_text)", ), ], [ Problem( task=["Download the content of `url`.", "Get me the text on the weg page `url`."], inputs=["url"], answer="text = text_downloader(url)", ), Problem( task=["Summarize this text.", "Summarize this text."], inputs=[], answer="summary = summarizer(text)", ), Problem( task=["Read it out loud to me.", "Read me the previous result."], inputs=[], answer="text_reader(summary)", ), ], [ Problem( task=[ "Generate an image from the text given in `text_input`.", ], inputs=["text_input"], answer="image_generator(text_input)", ), ], [ Problem( task=[ "Replace the beaver in the `image` by the `prompt`.", "Transform the `image` so that it contains the `prompt`.", "Use `prompt` to transform this `image`.", ], inputs=["image", "prompt"], answer="image_transformer(image, prompt)", ), ], [ Problem( task=["Provide me the summary of the `text`.", "Summarize `text`."], inputs=["text"], answer="summary = summarizer(text)", ), Problem( task=["Read this summary to me.", "Read it out loud."], inputs=[], answer="audio = text_reader(summarizer(text))", ), Problem( task=["Transcribing the previous result back in text.", "Transcribe the audio."], inputs=[], answer="text = transcriber(audio)", ), Problem( task=["Translating the last result in French.", "Translate this in French."], inputs=[], answer="translator(text, src_lang='English', tgt_lang='French')", ), ], [ Problem( task=["Generate a video of the `prompt`", "Animate a `prompt`", "Make me a short video using `prompt`."], inputs={"prompt": "A lobster swimming"}, answer="video_generator('A lobster swimming')", ), ], [ Problem( task=[ "Download the content of `url` and summarize it.", "Summarize the content of the web page at `url`.", ], inputs=["url"], answer="summary = summarizer(text_downloader(url))", ), Problem( task=["generate a video from it.", "Create an animation from the last result."], inputs=[], answer="video_generator(summary)", ), ], ] def get_theoretical_tools(agent_answer, theoretical_answer, code_answer): if not isinstance(theoretical_answer, list): return {name for name in TEST_TOOLS if name in code_answer} if isinstance(agent_answer, dict): for one_answer, one_code in zip(theoretical_answer, code_answer): if one_answer in agent_answer.values(): return {name for name in TEST_TOOLS if name in one_code} for one_answer, one_code in zip(theoretical_answer, code_answer): if agent_answer == one_answer: return {name for name in TEST_TOOLS if name in one_code} return {name for name in TEST_TOOLS if name in code_answer[0]} def evaluate_code(code, inputs=None, state=None, verbose=False, return_interpretor_error=False): tools = BASE_PYTHON_TOOLS.copy() for name, tool in TEST_TOOLS.items(): if name not in code: continue tools[name] = tool if isinstance(inputs, dict): inputs = inputs.copy() elif inputs is not None: inputs = {inp: f"<<{inp}>>" for inp in inputs} if state is not None: state.update(inputs) else: state = inputs try: return evaluate(code, tools, state) except InterpretorError as e: return str(e) except Exception as e: if verbose: print(e) return None def score_code(agent_answer, theoretical_answer, verbose: bool = False): if verbose: print(agent_answer, theoretical_answer) theoretical_answer = theoretical_answer if isinstance(theoretical_answer, list) else [theoretical_answer] if agent_answer in theoretical_answer: if verbose: print("Perfect!") return 1 elif isinstance(agent_answer, dict) and any(v in theoretical_answer for v in agent_answer.values()): if verbose: print("Almsot perfect, result in state!") return 0.75 else: if verbose: print("Result is not the right one but code executed.") return 0.3 def evaluate_one_result(explanation, code, agent_answer, theoretical_answer, answer, verbose=False): tools_in_explanation = {name for name in TEST_TOOLS if f"`{name}`" in explanation} theoretical_tools = get_theoretical_tools(agent_answer, theoretical_answer, answer) if tools_in_explanation == theoretical_tools: tool_selection_score = 1.0 tool_selection_errors = None else: missing_tools = len(theoretical_tools - tools_in_explanation) unexpected_tools = len(tools_in_explanation - theoretical_tools) tool_selection_score = max(0, 1.0 - 0.25 * missing_tools - 0.25 * unexpected_tools) tool_selection_errors = { "selected_tools": tools_in_explanation, "theoretical_tools": theoretical_tools, } tools_in_code = {name for name in TEST_TOOLS if name in code} if tools_in_code == theoretical_tools: tool_used_score = 1.0 tool_used_errors = None else: missing_tools = len(theoretical_tools - tools_in_code) unexpected_tools = len(tools_in_code - theoretical_tools) tool_used_score = max(0, 1.0 - 0.25 * missing_tools - 0.25 * unexpected_tools) tool_used_errors = { "selected_tools": tools_in_explanation, "theoretical_tools": theoretical_tools, } score = score_code(agent_answer, theoretical_answer, verbose=verbose) if score < 1.0: code_errors = { "code_produced": code, "evaluation": agent_answer, "theoretical_answer": theoretical_answer, } else: code_errors = None return (tool_selection_score, tool_used_score, score), (tool_selection_errors, tool_used_errors, code_errors) def evaluate_agent(agent, batch_size=8, verbose=False, return_errors=False): """ Evaluates a new agent on all `EVALUATION_TASKS`. Example: ```py agent = NewOpenAiAgent(model="text-davinci-003", api_key=your_api_key) bads = new_evaluate_agent(agent) for bad in bads: print(bad) ``` """ # Sanity check agent_tools = set(agent.toolbox.keys()) if agent_tools != set(TEST_TOOLS): missing_tools = set(TEST_TOOLS) - agent_tools unexpected_tools = set(agent_tools) - TEST_TOOLS raise ValueError( f"Fix the test tools in the evaluate_agent module. Tools mising: {missing_tools}. Extra tools: {unexpected_tools}." ) eval_tasks = [] eval_idx = [] for idx, pb in enumerate(EVALUATION_TASKS): if isinstance(pb.task, list): eval_tasks.extend(pb.task) eval_idx.extend([idx] * len(pb.task)) else: eval_tasks.append(pb.task) eval_idx.append(idx) tool_selection_score = 0 tool_used_score = 0 code_score = 0 if return_errors: tool_selection_errors = {} tool_used_errors = {} code_errors = {} for start_idx in range(0, len(eval_tasks), batch_size): end_idx = min(start_idx + batch_size, len(eval_tasks)) batch_tasks = eval_tasks[start_idx:end_idx] prompts = [agent.format_prompt(task) for task in batch_tasks] results = agent.generate_many(prompts, stop=["Task:"]) for idx, result in enumerate(results): problem = EVALUATION_TASKS[eval_idx[start_idx + idx]] if verbose: print(f"====Task {start_idx + idx}====\n{batch_tasks[idx]}\n") explanation, code = clean_code_for_run(result) # Evaluate agent answer and code answer agent_answer = evaluate_code(code, problem.inputs, verbose=verbose) if isinstance(problem.answer, list): theoretical_answer = [evaluate_code(answer, problem.inputs) for answer in problem.answer] else: theoretical_answer = evaluate_code(problem.answer, problem.inputs) scores, errors = evaluate_one_result( explanation, code, agent_answer, theoretical_answer, problem.answer, verbose=verbose ) tool_selection_score += scores[0] tool_used_score += scores[1] code_score += scores[2] if return_errors: if errors[0] is not None: tool_selection_errors[batch_tasks[idx]] = errors[0] if errors[1] is not None: tool_used_errors[batch_tasks[idx]] = errors[1] if errors[2] is not None: code_errors[batch_tasks[idx]] = errors[2] scores = { "tool selection score": 100 * (tool_selection_score / len(eval_tasks)), "tool used score": 100 * (tool_used_score / len(eval_tasks)), "code score": 100 * (code_score / len(eval_tasks)), } if return_errors: return scores, tool_selection_errors, tool_used_errors, code_errors else: return scores def evaluate_chat_agent(agent, verbose=False, return_errors=False): """ Evaluates a new agent on all `EVALUATION_CHATS`. Example: ```py agent = NewOpenAiAgent(model="text-davinci-003", api_key=your_api_key) bads = new_evaluate_agent(agent) for bad in bads: print(bad) ``` """ # Sanity check agent_tools = set(agent.toolbox.keys()) if agent_tools != set(TEST_TOOLS): missing_tools = set(TEST_TOOLS) - agent_tools unexpected_tools = agent_tools - set(TEST_TOOLS) raise ValueError( f"Fix the test tools in the evaluate_agent module. Tools mising: {missing_tools}. Extra tools: {unexpected_tools}." ) tool_selection_score = 0 tool_used_score = 0 code_score = 0 total_steps = 0 if return_errors: tool_selection_errors = {} tool_used_errors = {} code_errors = {} for chat_problem in EVALUATION_CHATS: if isinstance(chat_problem[0].task, str): resolved_problems = [chat_problem] else: resolved_problems = [ [Problem(task=pb.task[i], inputs=pb.inputs, answer=pb.answer) for pb in chat_problem] for i in range(len(chat_problem[0].task)) ] for problem in resolved_problems: agent.prepare_for_new_chat() agent_state = {} theoretical_state = ( [{} for _ in range(len(problem[0].answer))] if isinstance(problem[0].answer, list) else {} ) for step, step_problem in enumerate(problem): if verbose: print(step_problem.task) total_steps += 1 prompt = agent.format_prompt(step_problem.task, chat_mode=True) result = agent.generate_one(prompt, stop=["Human:", "====="]) agent.chat_history = prompt + result + "\n" explanation, code = clean_code_for_chat(result) if verbose: print(f"==Explanation from the agent==\n{explanation}") print(f"\n==Code generated by the agent==\n{code}") # Evaluate agent answer and code answer agent_answer = evaluate_code(code, step_problem.inputs, state=agent_state, verbose=verbose) answer = step_problem.answer if isinstance(answer, list): theoretical_answer = [ evaluate_code(a, step_problem.inputs, state=state) for a, state in zip(answer, theoretical_state) ] else: theoretical_answer = evaluate_code(answer, step_problem.inputs, state=theoretical_state) scores, errors = evaluate_one_result( explanation, code, agent_answer, theoretical_answer, answer, verbose=verbose ) tool_selection_score += scores[0] tool_used_score += scores[1] code_score += scores[2] if return_errors: if errors[0] is not None: tool_selection_errors[step_problem.task] = errors[0] if errors[1] is not None: tool_used_errors[step_problem.task] = errors[1] if errors[2] is not None: code_errors[step_problem.task] = errors[2] scores = { "tool selection score": 100 * (tool_selection_score / total_steps), "tool used score": 100 * (tool_used_score / total_steps), "code score": 100 * (code_score / total_steps), } if return_errors: return scores, tool_selection_errors, tool_used_errors, code_errors else: return scores
transformers/src/transformers/tools/evaluate_agent.py/0
{ "file_path": "transformers/src/transformers/tools/evaluate_agent.py", "repo_id": "transformers", "token_count": 10886 }
381
# coding=utf-8 # Copyright 2020-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch-independent utilities for the Trainer class. """ import copy import functools import gc import inspect import os import random import re import threading import time from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union import numpy as np from .utils import ( ExplicitEnum, is_psutil_available, is_tf_available, is_torch_available, is_torch_cuda_available, is_torch_mps_available, is_torch_npu_available, is_torch_tpu_available, is_torch_xpu_available, requires_backends, ) if is_torch_available(): import torch def seed_worker(_): """ Helper function to set worker seed during Dataloader initialization. """ worker_seed = torch.initial_seed() % 2**32 set_seed(worker_seed) def enable_full_determinism(seed: int, warn_only: bool = False): """ Helper function for reproducible behavior during distributed training. See - https://pytorch.org/docs/stable/notes/randomness.html for pytorch - https://www.tensorflow.org/api_docs/python/tf/config/experimental/enable_op_determinism for tensorflow """ # set seed first set_seed(seed) if is_torch_available(): # Enable PyTorch deterministic mode. This potentially requires either the environment # variable 'CUDA_LAUNCH_BLOCKING' or 'CUBLAS_WORKSPACE_CONFIG' to be set, # depending on the CUDA version, so we set them both here os.environ["CUDA_LAUNCH_BLOCKING"] = "1" os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8" torch.use_deterministic_algorithms(True, warn_only=warn_only) # Enable CUDNN deterministic mode torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False if is_tf_available(): import tensorflow as tf tf.config.experimental.enable_op_determinism() def set_seed(seed: int): """ Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch` and/or `tf` (if installed). Args: seed (`int`): The seed to set. """ random.seed(seed) np.random.seed(seed) if is_torch_available(): torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) # ^^ safe to call this function even if cuda is not available if is_torch_npu_available(): torch.npu.manual_seed_all(seed) if is_torch_xpu_available(): torch.xpu.manual_seed_all(seed) if is_tf_available(): import tensorflow as tf tf.random.set_seed(seed) def neftune_post_forward_hook(module, input, output): """ Implements the NEFTune forward pass for the model using forward hooks. Note this works only for torch.nn.Embedding layers. This method is slightly adapted from the original source code that can be found here: https://github.com/neelsjain/NEFTune Simply add it to your model as follows: ```python model = ... model.embed_tokens.neftune_noise_alpha = 0.1 model.embed_tokens.register_forward_hook(neftune_post_forward_hook) ``` Args: module (`torch.nn.Module`): The embedding module where the hook is attached. Note that you need to set `module.neftune_noise_alpha` to the desired noise alpha value. input (`torch.Tensor`): The input tensor to the model. output (`torch.Tensor`): The output tensor of the model (i.e. the embeddings). """ if module.training: dims = torch.tensor(output.size(1) * output.size(2)) mag_norm = module.neftune_noise_alpha / torch.sqrt(dims) output = output + torch.zeros_like(output).uniform_(-mag_norm, mag_norm) return output class EvalPrediction: """ Evaluation output (always contains labels), to be used to compute metrics. Parameters: predictions (`np.ndarray`): Predictions of the model. label_ids (`np.ndarray`): Targets to be matched. inputs (`np.ndarray`, *optional*): """ def __init__( self, predictions: Union[np.ndarray, Tuple[np.ndarray]], label_ids: Union[np.ndarray, Tuple[np.ndarray]], inputs: Optional[Union[np.ndarray, Tuple[np.ndarray]]] = None, ): self.predictions = predictions self.label_ids = label_ids self.inputs = inputs def __iter__(self): if self.inputs is not None: return iter((self.predictions, self.label_ids, self.inputs)) else: return iter((self.predictions, self.label_ids)) def __getitem__(self, idx): if idx < 0 or idx > 2: raise IndexError("tuple index out of range") if idx == 2 and self.inputs is None: raise IndexError("tuple index out of range") if idx == 0: return self.predictions elif idx == 1: return self.label_ids elif idx == 2: return self.inputs class EvalLoopOutput(NamedTuple): predictions: Union[np.ndarray, Tuple[np.ndarray]] label_ids: Optional[Union[np.ndarray, Tuple[np.ndarray]]] metrics: Optional[Dict[str, float]] num_samples: Optional[int] class PredictionOutput(NamedTuple): predictions: Union[np.ndarray, Tuple[np.ndarray]] label_ids: Optional[Union[np.ndarray, Tuple[np.ndarray]]] metrics: Optional[Dict[str, float]] class TrainOutput(NamedTuple): global_step: int training_loss: float metrics: Dict[str, float] PREFIX_CHECKPOINT_DIR = "checkpoint" _re_checkpoint = re.compile(r"^" + PREFIX_CHECKPOINT_DIR + r"\-(\d+)$") def get_last_checkpoint(folder): content = os.listdir(folder) checkpoints = [ path for path in content if _re_checkpoint.search(path) is not None and os.path.isdir(os.path.join(folder, path)) ] if len(checkpoints) == 0: return return os.path.join(folder, max(checkpoints, key=lambda x: int(_re_checkpoint.search(x).groups()[0]))) class IntervalStrategy(ExplicitEnum): NO = "no" STEPS = "steps" EPOCH = "epoch" class EvaluationStrategy(ExplicitEnum): NO = "no" STEPS = "steps" EPOCH = "epoch" class HubStrategy(ExplicitEnum): END = "end" EVERY_SAVE = "every_save" CHECKPOINT = "checkpoint" ALL_CHECKPOINTS = "all_checkpoints" class BestRun(NamedTuple): """ The best run found by a hyperparameter search (see [`~Trainer.hyperparameter_search`]). Parameters: run_id (`str`): The id of the best run (if models were saved, the corresponding checkpoint will be in the folder ending with run-{run_id}). objective (`float`): The objective that was obtained for this run. hyperparameters (`Dict[str, Any]`): The hyperparameters picked to get this run. run_summary (`Optional[Any]`): A summary of tuning experiments. `ray.tune.ExperimentAnalysis` object for Ray backend. """ run_id: str objective: Union[float, List[float]] hyperparameters: Dict[str, Any] run_summary: Optional[Any] = None def default_compute_objective(metrics: Dict[str, float]) -> float: """ The default objective to maximize/minimize when doing an hyperparameter search. It is the evaluation loss if no metrics are provided to the [`Trainer`], the sum of all metrics otherwise. Args: metrics (`Dict[str, float]`): The metrics returned by the evaluate method. Return: `float`: The objective to minimize or maximize """ metrics = copy.deepcopy(metrics) loss = metrics.pop("eval_loss", None) _ = metrics.pop("epoch", None) # Remove speed metrics speed_metrics = [ m for m in metrics.keys() if m.endswith("_runtime") or m.endswith("_per_second") or m.endswith("_compilation_time") ] for sm in speed_metrics: _ = metrics.pop(sm, None) return loss if len(metrics) == 0 else sum(metrics.values()) def default_hp_space_optuna(trial) -> Dict[str, float]: from .integrations import is_optuna_available assert is_optuna_available(), "This function needs Optuna installed: `pip install optuna`" return { "learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True), "num_train_epochs": trial.suggest_int("num_train_epochs", 1, 5), "seed": trial.suggest_int("seed", 1, 40), "per_device_train_batch_size": trial.suggest_categorical("per_device_train_batch_size", [4, 8, 16, 32, 64]), } def default_hp_space_ray(trial) -> Dict[str, float]: from .integrations import is_ray_tune_available assert is_ray_tune_available(), "This function needs ray installed: `pip install ray[tune]`" from ray import tune return { "learning_rate": tune.loguniform(1e-6, 1e-4), "num_train_epochs": tune.choice(list(range(1, 6))), "seed": tune.uniform(1, 40), "per_device_train_batch_size": tune.choice([4, 8, 16, 32, 64]), } def default_hp_space_sigopt(trial): return [ {"bounds": {"min": 1e-6, "max": 1e-4}, "name": "learning_rate", "type": "double", "transformamtion": "log"}, {"bounds": {"min": 1, "max": 6}, "name": "num_train_epochs", "type": "int"}, {"bounds": {"min": 1, "max": 40}, "name": "seed", "type": "int"}, { "categorical_values": ["4", "8", "16", "32", "64"], "name": "per_device_train_batch_size", "type": "categorical", }, ] def default_hp_space_wandb(trial) -> Dict[str, float]: from .integrations import is_wandb_available if not is_wandb_available(): raise ImportError("This function needs wandb installed: `pip install wandb`") return { "method": "random", "metric": {"name": "objective", "goal": "minimize"}, "parameters": { "learning_rate": {"distribution": "uniform", "min": 1e-6, "max": 1e-4}, "num_train_epochs": {"distribution": "int_uniform", "min": 1, "max": 6}, "seed": {"distribution": "int_uniform", "min": 1, "max": 40}, "per_device_train_batch_size": {"values": [4, 8, 16, 32, 64]}, }, } class HPSearchBackend(ExplicitEnum): OPTUNA = "optuna" RAY = "ray" SIGOPT = "sigopt" WANDB = "wandb" def is_main_process(local_rank): """ Whether or not the current process is the local process, based on `xm.get_ordinal()` (for TPUs) first, then on `local_rank`. """ if is_torch_tpu_available(check_device=True): import torch_xla.core.xla_model as xm return xm.get_ordinal() == 0 return local_rank in [-1, 0] def total_processes_number(local_rank): """ Return the number of processes launched in parallel. Works with `torch.distributed` and TPUs. """ if is_torch_tpu_available(check_device=True): import torch_xla.core.xla_model as xm return xm.xrt_world_size() elif local_rank != -1 and is_torch_available(): import torch return torch.distributed.get_world_size() return 1 def speed_metrics(split, start_time, num_samples=None, num_steps=None, num_tokens=None): """ Measure and return speed performance metrics. This function requires a time snapshot `start_time` before the operation to be measured starts and this function should be run immediately after the operation to be measured has completed. Args: - split: name to prefix metric (like train, eval, test...) - start_time: operation start time - num_samples: number of samples processed - num_steps: number of steps processed - num_tokens: number of tokens processed """ runtime = time.time() - start_time result = {f"{split}_runtime": round(runtime, 4)} if runtime == 0: return result if num_samples is not None: samples_per_second = num_samples / runtime result[f"{split}_samples_per_second"] = round(samples_per_second, 3) if num_steps is not None: steps_per_second = num_steps / runtime result[f"{split}_steps_per_second"] = round(steps_per_second, 3) if num_tokens is not None: tokens_per_second = num_tokens / runtime result[f"{split}_tokens_per_second"] = round(tokens_per_second, 3) return result class SchedulerType(ExplicitEnum): LINEAR = "linear" COSINE = "cosine" COSINE_WITH_RESTARTS = "cosine_with_restarts" POLYNOMIAL = "polynomial" CONSTANT = "constant" CONSTANT_WITH_WARMUP = "constant_with_warmup" INVERSE_SQRT = "inverse_sqrt" REDUCE_ON_PLATEAU = "reduce_lr_on_plateau" class TrainerMemoryTracker: """ A helper class that tracks cpu and gpu memory. This class will silently skip unless `psutil` is available. Install with `pip install psutil`. When a stage completes, it can pass metrics dict to update with the memory metrics gathered during this stage. Example : ```python self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics) self._memory_tracker.start() # code ... metrics = {"train_runtime": 10.5} self._memory_tracker.stop_and_update_metrics(metrics) ``` At the moment GPU tracking is only for `pytorch`, but can be extended to support `tensorflow`. To understand this class' intricacies please read the documentation of [`~Trainer.log_metrics`]. """ # map trainer methods to metrics prefix stages = { "__init__": "init", "train": "train", "_inner_training_loop": "train", "evaluate": "eval", "predict": "test", } def __init__(self, skip_memory_metrics=False): self.skip_memory_metrics = skip_memory_metrics if not is_psutil_available(): # soft dependency on psutil self.skip_memory_metrics = True if self.skip_memory_metrics: return import psutil # noqa if is_torch_cuda_available(): import torch self.torch = torch self.gpu = {} elif is_torch_mps_available(): import torch self.torch = torch self.gpu = {} elif is_torch_xpu_available(): import torch self.torch = torch self.gpu = {} elif is_torch_npu_available(): import torch self.torch = torch self.gpu = {} else: self.torch = None self.process = psutil.Process() self.cur_stage = None self.cpu = {} self.init_reported = False def derive_stage(self): """derives the stage/caller name automatically""" caller = inspect.currentframe().f_back.f_back.f_code.co_name if caller in self.stages: return self.stages[caller] else: raise ValueError( f"was called from {caller}, but only expect to be called from one of {self.stages.keys()}" ) def cpu_mem_used(self): """get resident set size memory for the current process""" return self.process.memory_info().rss def peak_monitor_func(self): self.cpu_mem_used_peak = -1 while True: self.cpu_mem_used_peak = max(self.cpu_mem_used(), self.cpu_mem_used_peak) # can't sleep or will not catch the peak right (this comment is here on purpose) # time.sleep(0.001) # 1msec if not self.peak_monitoring: break def start(self): """start tracking for the caller's stage""" if self.skip_memory_metrics: return stage = self.derive_stage() # deal with nested calls of eval during train - simply ignore those if self.cur_stage is not None and self.cur_stage != stage: return self.cur_stage = stage gc.collect() if self.torch is not None: if torch.cuda.is_available(): self.torch.cuda.reset_peak_memory_stats() self.torch.cuda.empty_cache() elif is_torch_xpu_available(): self.torch.xpu.reset_peak_memory_stats() self.torch.xpu.empty_cache() elif is_torch_npu_available(): self.torch.npu.reset_peak_memory_stats() self.torch.npu.empty_cache() # gpu if self.torch is not None: if torch.cuda.is_available(): self.gpu_mem_used_at_start = self.torch.cuda.memory_allocated() elif is_torch_xpu_available(): self.gpu_mem_used_at_start = self.torch.xpu.memory_allocated() elif is_torch_npu_available(): self.gpu_mem_used_at_start = self.torch.npu.memory_allocated() # cpu self.cpu_mem_used_at_start = self.cpu_mem_used() self.peak_monitoring = True peak_monitor_thread = threading.Thread(target=self.peak_monitor_func) peak_monitor_thread.daemon = True peak_monitor_thread.start() def stop(self, stage): """stop tracking for the passed stage""" # deal with nested calls of eval during train - simply ignore those if self.cur_stage is not None and self.cur_stage != stage: return # this sends a signal to peak_monitor_func to complete its loop self.peak_monitoring = False # first ensure all objects get collected and their memory is freed gc.collect() if self.torch is not None: if torch.cuda.is_available(): self.torch.cuda.empty_cache() elif is_torch_xpu_available(): self.torch.xpu.empty_cache() elif is_torch_npu_available(): self.torch.npu.empty_cache() # concepts: # - alloc_delta: the difference of allocated memory between the end and the start # - peaked_delta: the difference between the peak memory and the current memory # in order to know how much memory the measured code consumed one needs to sum these two # gpu if self.torch is not None: if torch.cuda.is_available(): self.gpu_mem_used_now = self.torch.cuda.memory_allocated() self.gpu_mem_used_peak = self.torch.cuda.max_memory_allocated() elif is_torch_xpu_available(): self.gpu_mem_used_now = self.torch.xpu.memory_allocated() self.gpu_mem_used_peak = self.torch.xpu.max_memory_allocated() elif is_torch_npu_available(): self.gpu_mem_used_now = self.torch.npu.memory_allocated() self.gpu_mem_used_peak = self.torch.npu.max_memory_allocated() else: raise ValueError("No available GPU device found!") self.gpu[self.cur_stage] = { "begin": self.gpu_mem_used_at_start, "end": self.gpu_mem_used_now, "alloc": (self.gpu_mem_used_now - self.gpu_mem_used_at_start), "peaked": max(0, self.gpu_mem_used_peak - self.gpu_mem_used_now), } # cpu self.cpu_mem_used_now = self.cpu_mem_used() self.cpu[self.cur_stage] = { "begin": self.cpu_mem_used_at_start, "end": self.cpu_mem_used_now, "alloc": (self.cpu_mem_used_now - self.cpu_mem_used_at_start), "peaked": max(0, self.cpu_mem_used_peak - self.cpu_mem_used_now), } # reset - cycle finished self.cur_stage = None def update_metrics(self, stage, metrics): """updates the metrics""" if self.skip_memory_metrics: return # deal with nested calls of eval during train - simply ignore those if self.cur_stage is not None and self.cur_stage != stage: return # since we don't have a way to return init metrics, we push them into the first of train/val/predict stages = [stage] if not self.init_reported: stages.insert(0, "init") self.init_reported = True for stage in stages: for t in ["alloc", "peaked"]: if stage in self.cpu and t in self.cpu[stage]: metrics[f"{stage}_mem_cpu_{t}_delta"] = self.cpu[stage][t] if self.torch is not None and stage in self.gpu and t in self.gpu[stage]: metrics[f"{stage}_mem_gpu_{t}_delta"] = self.gpu[stage][t] # if we need additional debug info, enable the following # for t in ["begin", "end"]: # if stage in self.cpu and t in self.cpu[stage]: # metrics[f"{stage}_mem_cpu_{t}"] = self.cpu[stage][t] # if self.torch is not None and stage in self.gpu and t in self.gpu[stage]: # metrics[f"{stage}_mem_gpu_{t}"] = self.gpu[stage][t] # since memory can be allocated before init, and it might be difficult to track overall # memory usage, in particular for GPU, let's report memory usage at the point init was called if stages[0] == "init": metrics["before_init_mem_cpu"] = self.cpu["init"]["begin"] if self.torch is not None: metrics["before_init_mem_gpu"] = self.gpu["init"]["begin"] # if we also wanted to report any additional memory allocations in between init and # whatever the next stage was we could also report this: # if self.cpu["init"]["end"] != self.cpu[stage]["begin"]: # metrics[f"after_init_mem_cpu_delta"] = self.cpu[stage]["begin"] - self.cpu["init"]["end"] # if self.torch is not None and self.gpu["init"]["end"] != self.gpu[stage]["begin"]: # metrics[f"after_init_mem_gpu_delta"] = self.gpu[stage]["begin"] - self.gpu["init"]["end"] def stop_and_update_metrics(self, metrics=None): """combine stop and metrics update in one call for simpler code""" if self.skip_memory_metrics: return stage = self.derive_stage() self.stop(stage) # init doesn't have metrics to update so we just save that data for later stages to retrieve if metrics is not None: self.update_metrics(stage, metrics) def has_length(dataset): """ Checks if the dataset implements __len__() and it doesn't raise an error """ try: return len(dataset) is not None except TypeError: # TypeError: len() of unsized object return False def denumpify_detensorize(metrics): """ Recursively calls `.item()` on the element of the dictionary passed """ if isinstance(metrics, (list, tuple)): return type(metrics)(denumpify_detensorize(m) for m in metrics) elif isinstance(metrics, dict): return type(metrics)({k: denumpify_detensorize(v) for k, v in metrics.items()}) elif isinstance(metrics, np.generic): return metrics.item() elif is_torch_available() and isinstance(metrics, torch.Tensor) and metrics.numel() == 1: return metrics.item() return metrics def number_of_arguments(func): """ Return the number of arguments of the passed function, even if it's a partial function. """ if isinstance(func, functools.partial): total_args = len(inspect.signature(func.func).parameters) return total_args - len(func.args) - len(func.keywords) return len(inspect.signature(func).parameters) def find_executable_batch_size( function: callable = None, starting_batch_size: int = 128, auto_find_batch_size: bool = False ): """ Args: A basic decorator that will try to execute `function`. If it fails from exceptions related to out-of-memory or CUDNN, the batch size is cut in half and passed to `function`. `function` must take in a `batch_size` parameter as its first argument. function (`callable`, *optional*) A function to wrap starting_batch_size (`int`, *optional*) The batch size to try and fit into memory auto_find_batch_size (`bool`, *optional*) If False, will just execute `function` """ if function is None: return functools.partial( find_executable_batch_size, starting_batch_size=starting_batch_size, auto_find_batch_size=auto_find_batch_size, ) if auto_find_batch_size: requires_backends(find_executable_batch_size, "accelerate") from accelerate.utils import find_executable_batch_size as accelerate_find_executable_batch_size return accelerate_find_executable_batch_size(function=function, starting_batch_size=starting_batch_size) return functools.partial(function, batch_size=starting_batch_size) class FSDPOption(ExplicitEnum): FULL_SHARD = "full_shard" SHARD_GRAD_OP = "shard_grad_op" NO_SHARD = "no_shard" HYBRID_SHARD = "hybrid_shard" HYBRID_SHARD_ZERO2 = "hybrid_shard_zero2" OFFLOAD = "offload" AUTO_WRAP = "auto_wrap" class RemoveColumnsCollator: """Wrap the data collator to remove unused columns before they are passed to the collator.""" def __init__( self, data_collator, signature_columns, logger=None, model_name: Optional[str] = None, description: Optional[str] = None, ): self.data_collator = data_collator self.signature_columns = signature_columns self.logger = logger self.description = description self.model_name = model_name self.message_logged = False def _remove_columns(self, feature: dict) -> dict: if not isinstance(feature, dict): return feature if not self.message_logged and self.logger and self.model_name: ignored_columns = list(set(feature.keys()) - set(self.signature_columns)) if len(ignored_columns) > 0: dset_description = "" if self.description is None else f"in the {self.description} set" self.logger.info( f"The following columns {dset_description} don't have a corresponding argument in " f"`{self.model_name}.forward` and have been ignored: {', '.join(ignored_columns)}." f" If {', '.join(ignored_columns)} are not expected by `{self.model_name}.forward`, " " you can safely ignore this message." ) self.message_logged = True return {k: v for k, v in feature.items() if k in self.signature_columns} def __call__(self, features: List[dict]): features = [self._remove_columns(feature) for feature in features] return self.data_collator(features)
transformers/src/transformers/trainer_utils.py/0
{ "file_path": "transformers/src/transformers/trainer_utils.py", "repo_id": "transformers", "token_count": 11573 }
382
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class AlbertTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class BarthezTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class BartphoTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class BertGenerationTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class BigBirdTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class CamembertTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class CodeLlamaTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class CpmTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class DebertaV2Tokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class ErnieMTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class FNetTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class GPTSw3Tokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class LayoutXLMTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class LlamaTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class M2M100Tokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class MarianTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class MBart50Tokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class MBartTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class MLukeTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class MT5Tokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class NllbTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class PegasusTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class PLBartTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class ReformerTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class RemBertTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class SeamlessM4TTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class SiglipTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class Speech2TextTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class SpeechT5Tokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class T5Tokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class XGLMTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class XLMProphetNetTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class XLMRobertaTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class XLNetTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"])
transformers/src/transformers/utils/dummy_sentencepiece_objects.py/0
{ "file_path": "transformers/src/transformers/utils/dummy_sentencepiece_objects.py", "repo_id": "transformers", "token_count": 2376 }
383
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: sentencepiece_model.proto # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name="sentencepiece_model.proto", package="sentencepiece", syntax="proto2", serialized_options=b"H\003", create_key=_descriptor._internal_create_key, serialized_pb=( b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\xa1\n\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01' b" \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02" b" \x01(\t\x12\x41\n\nmodel_type\x18\x03" b" \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04" b" \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12" b' \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n' b" \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b" b" \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12" b' \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r' b" \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e" b" \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f" b" \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12" b" \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10" b" \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11" b" \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14" b" \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15" b" \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17" b" \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16" b" \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18" b" \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19" b" \x01(\x08:\x05\x66\x61lse\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e" b" \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$" b" \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18" b' \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18"' b" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18)" b" \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+" b" \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18." b" \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30" b" \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87" b" \x12+\n\x1ctrain_extremely_large_corpus\x18\x31" b' \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01' b" \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03" b" \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12" b" \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06" b' \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01' b' \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01' b" \x01(\t\x12\x10\n\x08\x65xpected\x18\x02" b' \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01' b" \x03(\x0b\x32'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02" b" \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03" b" \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04" b" \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05" b" \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01" b" \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03" b' \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03' ), ) _TRAINERSPEC_MODELTYPE = _descriptor.EnumDescriptor( name="ModelType", full_name="sentencepiece.TrainerSpec.ModelType", filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="UNIGRAM", index=0, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="BPE", index=1, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="WORD", index=2, number=3, serialized_options=None, type=None, create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="CHAR", index=3, number=4, serialized_options=None, type=None, create_key=_descriptor._internal_create_key, ), ], containing_type=None, serialized_options=None, serialized_start=1294, serialized_end=1347, ) _sym_db.RegisterEnumDescriptor(_TRAINERSPEC_MODELTYPE) _MODELPROTO_SENTENCEPIECE_TYPE = _descriptor.EnumDescriptor( name="Type", full_name="sentencepiece.ModelProto.SentencePiece.Type", filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="NORMAL", index=0, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="UNKNOWN", index=1, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="CONTROL", index=2, number=3, serialized_options=None, type=None, create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="USER_DEFINED", index=3, number=4, serialized_options=None, type=None, create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="BYTE", index=4, number=6, serialized_options=None, type=None, create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="UNUSED", index=5, number=5, serialized_options=None, type=None, create_key=_descriptor._internal_create_key, ), ], containing_type=None, serialized_options=None, serialized_start=2100, serialized_end=2184, ) _sym_db.RegisterEnumDescriptor(_MODELPROTO_SENTENCEPIECE_TYPE) _TRAINERSPEC = _descriptor.Descriptor( name="TrainerSpec", full_name="sentencepiece.TrainerSpec", filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="input", full_name="sentencepiece.TrainerSpec.input", index=0, number=1, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="input_format", full_name="sentencepiece.TrainerSpec.input_format", index=1, number=7, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="model_prefix", full_name="sentencepiece.TrainerSpec.model_prefix", index=2, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="model_type", full_name="sentencepiece.TrainerSpec.model_type", index=3, number=3, type=14, cpp_type=8, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="vocab_size", full_name="sentencepiece.TrainerSpec.vocab_size", index=4, number=4, type=5, cpp_type=1, label=1, has_default_value=True, default_value=8000, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="accept_language", full_name="sentencepiece.TrainerSpec.accept_language", index=5, number=5, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="self_test_sample_size", full_name="sentencepiece.TrainerSpec.self_test_sample_size", index=6, number=6, type=5, cpp_type=1, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="character_coverage", full_name="sentencepiece.TrainerSpec.character_coverage", index=7, number=10, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.9995), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="input_sentence_size", full_name="sentencepiece.TrainerSpec.input_sentence_size", index=8, number=11, type=4, cpp_type=4, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="shuffle_input_sentence", full_name="sentencepiece.TrainerSpec.shuffle_input_sentence", index=9, number=19, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="mining_sentence_size", full_name="sentencepiece.TrainerSpec.mining_sentence_size", index=10, number=12, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b"\030\001", file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="training_sentence_size", full_name="sentencepiece.TrainerSpec.training_sentence_size", index=11, number=13, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b"\030\001", file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="seed_sentencepiece_size", full_name="sentencepiece.TrainerSpec.seed_sentencepiece_size", index=12, number=14, type=5, cpp_type=1, label=1, has_default_value=True, default_value=1000000, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="shrinking_factor", full_name="sentencepiece.TrainerSpec.shrinking_factor", index=13, number=15, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.75), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="max_sentence_length", full_name="sentencepiece.TrainerSpec.max_sentence_length", index=14, number=18, type=5, cpp_type=1, label=1, has_default_value=True, default_value=4192, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="num_threads", full_name="sentencepiece.TrainerSpec.num_threads", index=15, number=16, type=5, cpp_type=1, label=1, has_default_value=True, default_value=16, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="num_sub_iterations", full_name="sentencepiece.TrainerSpec.num_sub_iterations", index=16, number=17, type=5, cpp_type=1, label=1, has_default_value=True, default_value=2, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="max_sentencepiece_length", full_name="sentencepiece.TrainerSpec.max_sentencepiece_length", index=17, number=20, type=5, cpp_type=1, label=1, has_default_value=True, default_value=16, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="split_by_unicode_script", full_name="sentencepiece.TrainerSpec.split_by_unicode_script", index=18, number=21, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="split_by_number", full_name="sentencepiece.TrainerSpec.split_by_number", index=19, number=23, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="split_by_whitespace", full_name="sentencepiece.TrainerSpec.split_by_whitespace", index=20, number=22, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="treat_whitespace_as_suffix", full_name="sentencepiece.TrainerSpec.treat_whitespace_as_suffix", index=21, number=24, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="split_digits", full_name="sentencepiece.TrainerSpec.split_digits", index=22, number=25, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="control_symbols", full_name="sentencepiece.TrainerSpec.control_symbols", index=23, number=30, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="user_defined_symbols", full_name="sentencepiece.TrainerSpec.user_defined_symbols", index=24, number=31, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="required_chars", full_name="sentencepiece.TrainerSpec.required_chars", index=25, number=36, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="byte_fallback", full_name="sentencepiece.TrainerSpec.byte_fallback", index=26, number=35, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="vocabulary_output_piece_score", full_name="sentencepiece.TrainerSpec.vocabulary_output_piece_score", index=27, number=32, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="hard_vocab_limit", full_name="sentencepiece.TrainerSpec.hard_vocab_limit", index=28, number=33, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="use_all_vocab", full_name="sentencepiece.TrainerSpec.use_all_vocab", index=29, number=34, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="unk_id", full_name="sentencepiece.TrainerSpec.unk_id", index=30, number=40, type=5, cpp_type=1, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="bos_id", full_name="sentencepiece.TrainerSpec.bos_id", index=31, number=41, type=5, cpp_type=1, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="eos_id", full_name="sentencepiece.TrainerSpec.eos_id", index=32, number=42, type=5, cpp_type=1, label=1, has_default_value=True, default_value=2, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="pad_id", full_name="sentencepiece.TrainerSpec.pad_id", index=33, number=43, type=5, cpp_type=1, label=1, has_default_value=True, default_value=-1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="unk_piece", full_name="sentencepiece.TrainerSpec.unk_piece", index=34, number=45, type=9, cpp_type=9, label=1, has_default_value=True, default_value=b"<unk>".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="bos_piece", full_name="sentencepiece.TrainerSpec.bos_piece", index=35, number=46, type=9, cpp_type=9, label=1, has_default_value=True, default_value=b"<s>".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="eos_piece", full_name="sentencepiece.TrainerSpec.eos_piece", index=36, number=47, type=9, cpp_type=9, label=1, has_default_value=True, default_value=b"</s>".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="pad_piece", full_name="sentencepiece.TrainerSpec.pad_piece", index=37, number=48, type=9, cpp_type=9, label=1, has_default_value=True, default_value=b"<pad>".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="unk_surface", full_name="sentencepiece.TrainerSpec.unk_surface", index=38, number=44, type=9, cpp_type=9, label=1, has_default_value=True, default_value=b" \342\201\207 ".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="train_extremely_large_corpus", full_name="sentencepiece.TrainerSpec.train_extremely_large_corpus", index=39, number=49, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[], enum_types=[ _TRAINERSPEC_MODELTYPE, ], serialized_options=None, is_extendable=True, syntax="proto2", extension_ranges=[ (200, 536870912), ], oneofs=[], serialized_start=45, serialized_end=1358, ) _NORMALIZERSPEC = _descriptor.Descriptor( name="NormalizerSpec", full_name="sentencepiece.NormalizerSpec", filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", full_name="sentencepiece.NormalizerSpec.name", index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="precompiled_charsmap", full_name="sentencepiece.NormalizerSpec.precompiled_charsmap", index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b"", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="add_dummy_prefix", full_name="sentencepiece.NormalizerSpec.add_dummy_prefix", index=2, number=3, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="remove_extra_whitespaces", full_name="sentencepiece.NormalizerSpec.remove_extra_whitespaces", index=3, number=4, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="escape_whitespaces", full_name="sentencepiece.NormalizerSpec.escape_whitespaces", index=4, number=5, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="normalization_rule_tsv", full_name="sentencepiece.NormalizerSpec.normalization_rule_tsv", index=5, number=6, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=True, syntax="proto2", extension_ranges=[ (200, 536870912), ], oneofs=[], serialized_start=1361, serialized_end=1570, ) _SELFTESTDATA_SAMPLE = _descriptor.Descriptor( name="Sample", full_name="sentencepiece.SelfTestData.Sample", filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="input", full_name="sentencepiece.SelfTestData.Sample.input", index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="expected", full_name="sentencepiece.SelfTestData.Sample.expected", index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax="proto2", extension_ranges=[], oneofs=[], serialized_start=1641, serialized_end=1682, ) _SELFTESTDATA = _descriptor.Descriptor( name="SelfTestData", full_name="sentencepiece.SelfTestData", filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="samples", full_name="sentencepiece.SelfTestData.samples", index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[ _SELFTESTDATA_SAMPLE, ], enum_types=[], serialized_options=None, is_extendable=True, syntax="proto2", extension_ranges=[ (200, 536870912), ], oneofs=[], serialized_start=1572, serialized_end=1693, ) _MODELPROTO_SENTENCEPIECE = _descriptor.Descriptor( name="SentencePiece", full_name="sentencepiece.ModelProto.SentencePiece", filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="piece", full_name="sentencepiece.ModelProto.SentencePiece.piece", index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="score", full_name="sentencepiece.ModelProto.SentencePiece.score", index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="type", full_name="sentencepiece.ModelProto.SentencePiece.type", index=2, number=3, type=14, cpp_type=8, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[], enum_types=[ _MODELPROTO_SENTENCEPIECE_TYPE, ], serialized_options=None, is_extendable=True, syntax="proto2", extension_ranges=[ (200, 536870912), ], oneofs=[], serialized_start=1985, serialized_end=2195, ) _MODELPROTO = _descriptor.Descriptor( name="ModelProto", full_name="sentencepiece.ModelProto", filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="pieces", full_name="sentencepiece.ModelProto.pieces", index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="trainer_spec", full_name="sentencepiece.ModelProto.trainer_spec", index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="normalizer_spec", full_name="sentencepiece.ModelProto.normalizer_spec", index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="self_test_data", full_name="sentencepiece.ModelProto.self_test_data", index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="denormalizer_spec", full_name="sentencepiece.ModelProto.denormalizer_spec", index=4, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[ _MODELPROTO_SENTENCEPIECE, ], enum_types=[], serialized_options=None, is_extendable=True, syntax="proto2", extension_ranges=[ (200, 536870912), ], oneofs=[], serialized_start=1696, serialized_end=2206, ) _TRAINERSPEC.fields_by_name["model_type"].enum_type = _TRAINERSPEC_MODELTYPE _TRAINERSPEC_MODELTYPE.containing_type = _TRAINERSPEC _SELFTESTDATA_SAMPLE.containing_type = _SELFTESTDATA _SELFTESTDATA.fields_by_name["samples"].message_type = _SELFTESTDATA_SAMPLE _MODELPROTO_SENTENCEPIECE.fields_by_name["type"].enum_type = _MODELPROTO_SENTENCEPIECE_TYPE _MODELPROTO_SENTENCEPIECE.containing_type = _MODELPROTO _MODELPROTO_SENTENCEPIECE_TYPE.containing_type = _MODELPROTO_SENTENCEPIECE _MODELPROTO.fields_by_name["pieces"].message_type = _MODELPROTO_SENTENCEPIECE _MODELPROTO.fields_by_name["trainer_spec"].message_type = _TRAINERSPEC _MODELPROTO.fields_by_name["normalizer_spec"].message_type = _NORMALIZERSPEC _MODELPROTO.fields_by_name["self_test_data"].message_type = _SELFTESTDATA _MODELPROTO.fields_by_name["denormalizer_spec"].message_type = _NORMALIZERSPEC DESCRIPTOR.message_types_by_name["TrainerSpec"] = _TRAINERSPEC DESCRIPTOR.message_types_by_name["NormalizerSpec"] = _NORMALIZERSPEC DESCRIPTOR.message_types_by_name["SelfTestData"] = _SELFTESTDATA DESCRIPTOR.message_types_by_name["ModelProto"] = _MODELPROTO _sym_db.RegisterFileDescriptor(DESCRIPTOR) TrainerSpec = _reflection.GeneratedProtocolMessageType( "TrainerSpec", (_message.Message,), { "DESCRIPTOR": _TRAINERSPEC, "__module__": "sentencepiece_model_pb2", # @@protoc_insertion_point(class_scope:sentencepiece.TrainerSpec) }, ) _sym_db.RegisterMessage(TrainerSpec) NormalizerSpec = _reflection.GeneratedProtocolMessageType( "NormalizerSpec", (_message.Message,), { "DESCRIPTOR": _NORMALIZERSPEC, "__module__": "sentencepiece_model_pb2", # @@protoc_insertion_point(class_scope:sentencepiece.NormalizerSpec) }, ) _sym_db.RegisterMessage(NormalizerSpec) SelfTestData = _reflection.GeneratedProtocolMessageType( "SelfTestData", (_message.Message,), { "Sample": _reflection.GeneratedProtocolMessageType( "Sample", (_message.Message,), { "DESCRIPTOR": _SELFTESTDATA_SAMPLE, "__module__": "sentencepiece_model_pb2", # @@protoc_insertion_point(class_scope:sentencepiece.SelfTestData.Sample) }, ), "DESCRIPTOR": _SELFTESTDATA, "__module__": "sentencepiece_model_pb2", # @@protoc_insertion_point(class_scope:sentencepiece.SelfTestData) }, ) _sym_db.RegisterMessage(SelfTestData) _sym_db.RegisterMessage(SelfTestData.Sample) ModelProto = _reflection.GeneratedProtocolMessageType( "ModelProto", (_message.Message,), { "SentencePiece": _reflection.GeneratedProtocolMessageType( "SentencePiece", (_message.Message,), { "DESCRIPTOR": _MODELPROTO_SENTENCEPIECE, "__module__": "sentencepiece_model_pb2", # @@protoc_insertion_point(class_scope:sentencepiece.ModelProto.SentencePiece) }, ), "DESCRIPTOR": _MODELPROTO, "__module__": "sentencepiece_model_pb2", # @@protoc_insertion_point(class_scope:sentencepiece.ModelProto) }, ) _sym_db.RegisterMessage(ModelProto) _sym_db.RegisterMessage(ModelProto.SentencePiece) DESCRIPTOR._options = None _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # @@protoc_insertion_point(module_scope)
transformers/src/transformers/utils/sentencepiece_model_pb2.py/0
{ "file_path": "transformers/src/transformers/utils/sentencepiece_model_pb2.py", "repo_id": "transformers", "token_count": 28261 }
384
# coding=utf-8 # Copyright 2022 {{cookiecutter.authors}} The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch {{cookiecutter.modelname}} model. """ {% if cookiecutter.is_encoder_decoder_model == "False" %} import math import os import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from typing import Optional, Tuple, Union from ...activations import ACT2FN from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel, SequenceSummary from ...pytorch_utils import ( apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) from ...utils import logging from .configuration_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.camelcase_modelname}}Config logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "{{cookiecutter.checkpoint_identifier}}" _CONFIG_FOR_DOC = "{{cookiecutter.camelcase_modelname}}Config" {{cookiecutter.uppercase_modelname}}_PRETRAINED_MODEL_ARCHIVE_LIST = [ "{{cookiecutter.checkpoint_identifier}}", # See all {{cookiecutter.modelname}} models at https://huggingface.co/models?filter={{cookiecutter.lowercase_modelname}} ] def load_tf_weights_in_{{cookiecutter.lowercase_modelname}}(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f"Converting TensorFlow checkpoint from {tf_path}") # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): name = name.split("/") # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any( n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] for n in name ): logger.info(f"Skipping {'/'.join(name)}") continue pointer = model for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "kernel" or scope_names[0] == "gamma": pointer = getattr(pointer, "weight") elif scope_names[0] == "output_bias" or scope_names[0] == "beta": pointer = getattr(pointer, "bias") elif scope_names[0] == "output_weights": pointer = getattr(pointer, "weight") elif scope_names[0] == "squad": pointer = getattr(pointer, "classifier") else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info(f"Skipping {'/'.join(name)}") continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name[-11:] == "_embeddings": pointer = getattr(pointer, "weight") elif m_name == "kernel": array = np.transpose(array) try: assert ( pointer.shape == array.shape ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info(f"Initialize PyTorch weight {name}") pointer.data = torch.from_numpy(array) return model # Copied from transformers.models.bert.modeling_bert.BertEmbeddings with Bert->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}Embeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), persistent=False, ) def forward( self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 ): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}SelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr(config, "position_embedding_type", "absolute") if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": seq_length = hidden_states.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in {{cookiecutter.camelcase_modelname}}Model forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}SelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}Attention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = {{cookiecutter.camelcase_modelname}}SelfAttention(config, position_embedding_type=position_embedding_type) self.output = {{cookiecutter.camelcase_modelname}}SelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}Intermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}Output(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}Layer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = {{cookiecutter.camelcase_modelname}}Attention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added" self.crossattention = {{cookiecutter.camelcase_modelname}}Attention(config, position_embedding_type="absolute") self.intermediate = {{cookiecutter.camelcase_modelname}}Intermediate(config) self.output = {{cookiecutter.camelcase_modelname}}Output(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: assert hasattr( self, "crossattention" ), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`" # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}Encoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([{{cookiecutter.camelcase_modelname}}Layer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): if self.gradient_checkpointing and self.training and use_cache: logger.warning( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}PredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}LMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = {{cookiecutter.camelcase_modelname}}PredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}OnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = {{cookiecutter.camelcase_modelname}}LMPredictionHead(config) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores class {{cookiecutter.camelcase_modelname}}PreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = {{cookiecutter.camelcase_modelname}}Config load_tf_weights = load_tf_weights_in_{{cookiecutter.lowercase_modelname}} base_model_prefix = "{{cookiecutter.lowercase_modelname}}" supports_gradient_checkpointing = True _keys_to_ignore_on_load_missing = [r"position_ids"] def _init_weights(self, module): """ Initialize the weights """ if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) {{cookiecutter.uppercase_modelname}}_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`~{{cookiecutter.camelcase_modelname}}Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ {{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`{{cookiecutter.camelcase_modelname}}Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare {{cookiecutter.modelname}} Model transformer outputting raw hidden-states without any specific head on top.", {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class {{cookiecutter.camelcase_modelname}}Model({{cookiecutter.camelcase_modelname}}PreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in [Attention is all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. """ def __init__(self, config): super().__init__(config) self.config = config self.embeddings = {{cookiecutter.camelcase_modelname}}Embeddings(config) self.encoder = {{cookiecutter.camelcase_modelname}}Encoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) if token_type_ids is None: if hasattr(self.embeddings, "token_type_ids"): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=sequence_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) @add_start_docstrings("""{{cookiecutter.modelname}} Model with a `language modeling` head on top. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING) class {{cookiecutter.camelcase_modelname}}ForMaskedLM({{cookiecutter.camelcase_modelname}}PreTrainedModel): def __init__(self, config): super().__init__(config) if config.is_decoder: logger.warning( "If you want to use `{{cookiecutter.camelcase_modelname}}ForMaskedLM` make sure `config.is_decoder=False` for " "bi-directional self-attention." ) self.{{cookiecutter.lowercase_modelname}} = {{cookiecutter.camelcase_modelname}}Model(config) self.cls = {{cookiecutter.camelcase_modelname}}OnlyMLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.{{cookiecutter.lowercase_modelname}}( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs): input_shape = input_ids.shape effective_batch_size = input_shape[0] # add a dummy token assert self.config.pad_token_id is not None, "The PAD token should be defined for generation" attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1) dummy_token = torch.full( (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device ) input_ids = torch.cat([input_ids, dummy_token], dim=1) return {"input_ids": input_ids, "attention_mask": attention_mask} @add_start_docstrings( """{{cookiecutter.modelname}} Model with a `language modeling` head on top for CLM fine-tuning. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING ) class {{cookiecutter.camelcase_modelname}}ForCausalLM({{cookiecutter.camelcase_modelname}}PreTrainedModel): _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] def __init__(self, config): super().__init__(config) if not config.is_decoder: logger.warning("If you want to use `{{cookiecutter.camelcase_modelname}}ForCausalLM` as a standalone, add `is_decoder=True.`") self.{{cookiecutter.lowercase_modelname}} = {{cookiecutter.camelcase_modelname}}Model(config) self.cls = {{cookiecutter.camelcase_modelname}}OnlyMLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional tensors are only required when the model is used as a decoder in a Sequence to Sequence model. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). Returns: Example: ```python >>> from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer, {{cookiecutter.camelcase_modelname}}ForCausalLM, {{cookiecutter.camelcase_modelname}}Config >>> import torch >>> tokenizer = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('{{cookiecutter.checkpoint_identifier}}') >>> config = {{cookiecutter.camelcase_modelname}}Config.from_pretrained("{{cookiecutter.checkpoint_identifier}}") >>> config.is_decoder = True >>> model = {{cookiecutter.camelcase_modelname}}ForCausalLM.from_pretrained('{{cookiecutter.checkpoint_identifier}}', config=config) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.logits ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.{{cookiecutter.lowercase_modelname}}( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) lm_loss = None if labels is not None: # we are doing next-token prediction; shift prediction scores and input ids by one shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[1:] return ((lm_loss,) + output) if lm_loss is not None else output return CausalLMOutputWithCrossAttentions( loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs): input_shape = input_ids.shape # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: attention_mask = input_ids.new_ones(input_shape) # cut decoder_input_ids if past is used if past_key_values is not None: input_ids = input_ids[:, -1:] return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values} def _reorder_cache(self, past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2]) + layer_past[2:],) return reordered_past class {{cookiecutter.camelcase_modelname}}ClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) self.config = config def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = ACT2FN[self.config.hidden_act](x) x = self.dropout(x) x = self.out_proj(x) return x @add_start_docstrings( """{{cookiecutter.modelname}} Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class {{cookiecutter.camelcase_modelname}}ForSequenceClassification({{cookiecutter.camelcase_modelname}}PreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.{{cookiecutter.lowercase_modelname}} = {{cookiecutter.camelcase_modelname}}Model(config) self.classifier = {{cookiecutter.camelcase_modelname}}ClassificationHead(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.{{cookiecutter.lowercase_modelname}}( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """{{cookiecutter.modelname}} Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class {{cookiecutter.camelcase_modelname}}ForMultipleChoice({{cookiecutter.camelcase_modelname}}PreTrainedModel): def __init__(self, config): super().__init__(config) self.{{cookiecutter.lowercase_modelname}} = {{cookiecutter.camelcase_modelname}}Model(config) self.sequence_summary = SequenceSummary(config) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.{{cookiecutter.lowercase_modelname}}( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] pooled_output = self.sequence_summary(sequence_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """{{cookiecutter.modelname}} Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class {{cookiecutter.camelcase_modelname}}ForTokenClassification({{cookiecutter.camelcase_modelname}}PreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.{{cookiecutter.lowercase_modelname}} = {{cookiecutter.camelcase_modelname}}Model(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.{{cookiecutter.lowercase_modelname}}( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """{{cookiecutter.modelname}} Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class {{cookiecutter.camelcase_modelname}}ForQuestionAnswering({{cookiecutter.camelcase_modelname}}PreTrainedModel): def __init__(self, config): super().__init__(config) config.num_labels = 2 self.num_labels = config.num_labels self.{{cookiecutter.lowercase_modelname}} = {{cookiecutter.camelcase_modelname}}Model(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, start_positions=None, end_positions=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.{{cookiecutter.lowercase_modelname}}( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) {% else %} import math import copy from typing import Optional, Tuple, List, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...utils import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, CausalLMOutputWithCrossAttentions ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.camelcase_modelname}}Config logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "{{cookiecutter.checkpoint_identifier}}" _CONFIG_FOR_DOC = "{{cookiecutter.camelcase_modelname}}Config" {{cookiecutter.uppercase_modelname}}_PRETRAINED_MODEL_ARCHIVE_LIST = [ "{{cookiecutter.checkpoint_identifier}}", # See all {{cookiecutter.modelname}} models at https://huggingface.co/models?filter={{cookiecutter.lowercase_modelname}} ] def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ Shift input ids one token to the right. """ shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() shifted_input_ids[:, 0] = decoder_start_token_id assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined." # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids class {{cookiecutter.camelcase_modelname}}LearnedPositionalEmbedding(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, num_embeddings: int, embedding_dim: int): super().__init__(num_embeddings, embedding_dim) def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0): """`input_ids_shape` is expected to be [bsz x seqlen].""" bsz, seq_len = input_ids_shape[:2] positions = torch.arange( past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device ) return super().forward(positions) class {{cookiecutter.camelcase_modelname}}Attention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads assert ( self.head_dim * num_heads == self.embed_dim ), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads})." self.scaling = self.head_dim ** -0.5 self.is_decoder = is_decoder self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, embed_dim = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit akward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value class {{cookiecutter.camelcase_modelname}}EncoderLayer(nn.Module): def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config): super().__init__() self.embed_dim = config.d_model self.self_attn = {{cookiecutter.camelcase_modelname}}Attention( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, output_attentions: bool = False, ): """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape *(batch, seq_len, embed_dim)* attention_mask (`torch.FloatTensor`): attention mask of size *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size *(config.encoder_attention_heads,)*. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states, attn_weights, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) if hidden_states.dtype == torch.float16 and (torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class {{cookiecutter.camelcase_modelname}}DecoderLayer(nn.Module): def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config): super().__init__() self.embed_dim = config.d_model self.self_attn = {{cookiecutter.camelcase_modelname}}Attention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn = {{cookiecutter.camelcase_modelname}}Attention( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, cross_layer_head_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = True, ): """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape *(batch, seq_len, embed_dim)* attention_mask (`torch.FloatTensor`): attention mask of size *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape *(batch, seq_len, embed_dim)* encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size *(encoder_attention_heads,)*. cross_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size *(decoder_attention_heads,)*. past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_layer_head_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value # Fully Connected residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) if use_cache: outputs += (present_key_value,) return outputs # Copied from transformers.models.bart.modeling_bart.BartClassificationHead with Bart->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}ClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__( self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float, ): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, hidden_states: torch.Tensor): hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states class {{cookiecutter.camelcase_modelname}}PreTrainedModel(PreTrainedModel): config_class = {{cookiecutter.camelcase_modelname}}Config base_model_prefix = "model" supports_gradient_checkpointing = True def _init_weights(self, module): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() {{cookiecutter.uppercase_modelname}}_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`~{{cookiecutter.camelcase_modelname}}Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ {{cookiecutter.uppercase_modelname}}_GENERATION_EXAMPLE = r""" Summarization example: ```python >>> from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer, {{cookiecutter.camelcase_modelname}}ForConditionalGeneration >>> model = {{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained('{{cookiecutter.checkpoint_identifier}}') >>> tokenizer = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('{{cookiecutter.checkpoint_identifier}}') >>> ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs." >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='pt') >>> # Generate Summary >>> summary_ids = model.generate(inputs['input_ids'], num_beams=4, max_length=5) >>> print(tokenizer.decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)) ``` """ {{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`~{{cookiecutter.camelcase_modelname}}Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Provide for translation and summarization training. By default, the model will create this tensor by shifting the `input_ids` to the right, following the paper. decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should read [`modeling_{{cookiecutter.lowercase_modelname}}._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ {{cookiecutter.uppercase_modelname}}_STANDALONE_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`ProphetNetTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class {{cookiecutter.camelcase_modelname}}Encoder({{cookiecutter.camelcase_modelname}}PreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`{{cookiecutter.camelcase_modelname}}EncoderLayer`]. Args: config: {{cookiecutter.camelcase_modelname}}Config embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, embed_tokens: Optional[nn.Embedding] = None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop embed_dim = config.d_model self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 if embed_tokens is not None: self.embed_tokens = embed_tokens else: self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx) self.embed_positions = {{cookiecutter.camelcase_modelname}}LearnedPositionalEmbedding( config.max_position_embeddings, embed_dim, ) self.layers = nn.ModuleList([{{cookiecutter.camelcase_modelname}}EncoderLayer(config) for _ in range(config.encoder_layers)]) self.layernorm_embedding = nn.LayerNorm(embed_dim) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def forward( self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`~{{cookiecutter.camelcase_modelname}}Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_shape) hidden_states = inputs_embeds + embed_pos hidden_states = self.layernorm_embedding(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) # expand attention_mask if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: assert head_mask.size()[0] == ( len(self.layers) ), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}." for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = torch.randn([]) if self.training and (dropout_probability < self.layerdrop): # skip the layer layer_outputs = (None, None) else: if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( encoder_layer.__call__, hidden_states, attention_mask, (head_mask[idx] if head_mask is not None else None), output_attentions, ) else: layer_outputs = encoder_layer( hidden_states, attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) class {{cookiecutter.camelcase_modelname}}Decoder({{cookiecutter.camelcase_modelname}}PreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`{{cookiecutter.camelcase_modelname}}DecoderLayer`] Args: config: {{cookiecutter.camelcase_modelname}}Config embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, embed_tokens: Optional[nn.Embedding] = None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_position_embeddings self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 if embed_tokens is not None: self.embed_tokens = embed_tokens else: self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx) self.embed_positions = {{cookiecutter.camelcase_modelname}}LearnedPositionalEmbedding( config.max_position_embeddings, config.d_model, ) self.layers = nn.ModuleList([{{cookiecutter.camelcase_modelname}}DecoderLayer(config) for _ in range(config.decoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`~{{cookiecutter.camelcase_modelname}}Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale attention_mask = _prepare_4d_causal_attention_mask(attention_mask, input_shape, inputs_embeds, past_key_values_length) # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) # embed positions positions = self.embed_positions(input_shape, past_key_values_length) hidden_states = inputs_embeds + positions hidden_states = self.layernorm_embedding(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) # decoder layers if self.gradient_checkpointing and self.training and use_cache: logger.warning("`use_cache = True` is incompatible with gradient checkpointing`. Setting `use_cache = False`...") use_cache = False all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None next_decoder_cache = () if use_cache else None # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): if attn_mask is not None: assert attn_mask.size()[0] == ( len(self.layers) ), f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}." for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = torch.randn([]) if self.training and (dropout_probability < self.layerdrop): continue past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, head_mask[idx] if head_mask is not None else None, cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, None, output_attentions, use_cache, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), cross_layer_head_mask=(cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None), past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) @add_start_docstrings( "The bare {{cookiecutter.modelname}} Model outputting raw hidden-states without any specific head on top.", {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class {{cookiecutter.camelcase_modelname}}Model({{cookiecutter.camelcase_modelname}}PreTrainedModel): def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config): super().__init__(config) padding_idx, vocab_size = config.pad_token_id, config.vocab_size self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx) self.encoder = {{cookiecutter.camelcase_modelname}}Encoder(config, self.shared) self.decoder = {{cookiecutter.camelcase_modelname}}Decoder(config, self.shared) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, value): self.shared = value self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, encoder_outputs=None, past_key_values=None, inputs_embeds=None, decoder_inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings( "The {{cookiecutter.modelname}} Model with a language modeling head. Can be used for summarization.", {{cookiecutter.uppercase_modelname}}_START_DOCSTRING ) class {{cookiecutter.camelcase_modelname}}ForConditionalGeneration({{cookiecutter.camelcase_modelname}}PreTrainedModel): base_model_prefix = "model" _keys_to_ignore_on_load_missing = [ r"final_logits_bias", r"encoder\.version", r"decoder\.version", r"lm_head\.weight", ] def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config): super().__init__(config) self.model = {{cookiecutter.camelcase_modelname}}Model(config) self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding: new_embeddings = super().resize_token_embeddings(new_num_tokens) self._resize_final_logits_bias(new_num_tokens) return new_embeddings def _resize_final_logits_bias(self, new_num_tokens: int) -> None: old_num_tokens = self.final_logits_bias.shape[-1] if new_num_tokens <= old_num_tokens: new_bias = self.final_logits_bias[:, :new_num_tokens] else: extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device) new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1) self.register_buffer("final_logits_bias", new_bias) def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @add_end_docstrings({{cookiecutter.uppercase_modelname}}_GENERATION_EXAMPLE) def forward( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, encoder_outputs=None, past_key_values=None, inputs_embeds=None, decoder_inputs_embeds=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Conditional generation example: ```python >>> from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer, {{cookiecutter.camelcase_modelname}}ForConditionalGeneration >>> tokenizer = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('{{cookiecutter.checkpoint_identifier}}') >>> TXT = "My friends are <mask> but they eat too many carbs." >>> model = {{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained('{{cookiecutter.checkpoint_identifier}}') >>> input_ids = tokenizer([TXT], return_tensors='pt')['input_ids'] >>> logits = model(input_ids).logits >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item() >>> probs = logits[0, masked_index].softmax(dim=0) >>> values, predictions = probs.topk(5) >>> tokenizer.decode(predictions).split() ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if use_cache: logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return Seq2SeqLMOutput( loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) def prepare_inputs_for_generation( self, decoder_input_ids, past_key_values=None, attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs ): # cut decoder_input_ids if past is used if past_key_values is not None: decoder_input_ids = decoder_input_ids[:, -1:] return { "input_ids": None, # encoder_outputs is defined. input_ids not needed "encoder_outputs": encoder_outputs, "past_key_values": past_key_values, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, "use_cache": use_cache, # change this to avoid caching (presumably for debugging) } @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),) return reordered_past @add_start_docstrings( """ {{cookiecutter.camelcase_modelname}} model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class {{cookiecutter.camelcase_modelname}}ForSequenceClassification({{cookiecutter.camelcase_modelname}}PreTrainedModel): def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, **kwargs): super().__init__(config, **kwargs) self.model = {{cookiecutter.camelcase_modelname}}Model(config) self.classification_head = {{cookiecutter.camelcase_modelname}}ClassificationHead( config.d_model, config.d_model, config.num_labels, config.classifier_dropout, ) self.model._init_weights(self.classification_head.dense) self.model._init_weights(self.classification_head.out_proj) @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, encoder_outputs=None, inputs_embeds=None, decoder_inputs_embeds=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False if input_ids is None and inputs_embeds is not None: raise NotImplementedError( f"Passing input embeddings is currently not supported for {self.__class__.__name__}" ) outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] # last hidden state eos_mask = input_ids.eq(self.config.eos_token_id).to(hidden_states.device) if len(torch.unique_consecutive(eos_mask.sum(1))) > 1: raise ValueError("All examples must have the same number of <eos> tokens.") sentence_representation = hidden_states[eos_mask, :].view(hidden_states.size(0), -1, hidden_states.size(-1))[ :, -1, : ] logits = self.classification_head(sentence_representation) loss = None if labels is not None: if self.config.problem_type is None: if self.config.num_labels == 1: self.config.problem_type = "regression" elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.config.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return Seq2SeqSequenceClassifierOutput( loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) @add_start_docstrings( """ {{cookiecutter.modelname}} Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`). """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class {{cookiecutter.camelcase_modelname}}ForQuestionAnswering({{cookiecutter.camelcase_modelname}}PreTrainedModel): def __init__(self, config): super().__init__(config) config.num_labels = 2 self.num_labels = config.num_labels self.model = {{cookiecutter.camelcase_modelname}}Model(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.model._init_weights(self.qa_outputs) @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, encoder_outputs=None, start_positions=None, end_positions=None, inputs_embeds=None, decoder_inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if start_positions is not None and end_positions is not None: use_cache = False outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = ( start_logits, end_logits, ) + outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return Seq2SeqQuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) # Copied from transformers.models.bart.modeling_bart.BartDecoderWrapper with Bart->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}DecoderWrapper({{cookiecutter.camelcase_modelname}}PreTrainedModel): """ This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is used in combination with the [`EncoderDecoderModel`] framework. """ def __init__(self, config): super().__init__(config) self.decoder = {{cookiecutter.camelcase_modelname}}Decoder(config) def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs) # Copied from transformers.models.bart.modeling_bart.BartForCausalLM with Bart->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}ForCausalLM({{cookiecutter.camelcase_modelname}}PreTrainedModel): def __init__(self, config): config = copy.deepcopy(config) config.is_decoder = True config.is_encoder_decoder = False super().__init__(config) self.model = {{cookiecutter.camelcase_modelname}}DecoderWrapper(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model.decoder = decoder def get_decoder(self): return self.model.decoder @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, inputs_embeds=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`~{{cookiecutter.camelcase_modelname}}Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. Returns: Example: ```python >>> from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer, {{cookiecutter.camelcase_modelname}}ForCausalLM >>> tokenizer = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('facebook/bart-large') >>> model = {{cookiecutter.camelcase_modelname}}ForCausalLM.from_pretrained('facebook/bart-large', add_cross_attention=False) >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits ``` """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model.decoder( input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = self.lm_head(outputs[0]) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs): # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: attention_mask = input_ids.new_ones(input_ids.shape) if past_key_values: input_ids = input_ids[:, -1:] # first step, decoder_cached_states are empty return { "input_ids": input_ids, # encoder_outputs is defined. input_ids not needed "attention_mask": attention_mask, "past_key_values": past_key_values, "use_cache": use_cache, } @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),) return reordered_past {% endif -%}
transformers/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py/0
{ "file_path": "transformers/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py", "repo_id": "transformers", "token_count": 65421 }
385
{ "modelname": "TemplateBI", "uppercase_modelname": "TEMPLATE_BI", "lowercase_modelname": "template_bi", "camelcase_modelname": "TemplateBi", "authors": "The HuggingFace Team", "checkpoint_identifier": "bi-brand-new-bert-base-cased", "tokenizer_type": "Standalone", "generate_tensorflow_pytorch_and_flax": "PyTorch, TensorFlow and Flax", "is_encoder_decoder_model": "False" }
transformers/templates/adding_a_new_model/tests/standalone.json/0
{ "file_path": "transformers/templates/adding_a_new_model/tests/standalone.json", "repo_id": "transformers", "token_count": 155 }
386
{ "model_type": "roberta" }
transformers/tests/fixtures/dummy-config.json/0
{ "file_path": "transformers/tests/fixtures/dummy-config.json", "repo_id": "transformers", "token_count": 15 }
387
# coding=utf-8 # Copyright 2020 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from typing import List, Union from parameterized import parameterized from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from torch import nn from transformers.generation import ( EncoderNoRepeatNGramLogitsProcessor, EncoderRepetitionPenaltyLogitsProcessor, EpsilonLogitsWarper, EtaLogitsWarper, ExponentialDecayLengthPenalty, ForcedBOSTokenLogitsProcessor, ForcedEOSTokenLogitsProcessor, HammingDiversityLogitsProcessor, InfNanRemoveLogitsProcessor, LogitNormalization, LogitsProcessorList, MinLengthLogitsProcessor, MinNewTokensLengthLogitsProcessor, NoBadWordsLogitsProcessor, NoRepeatNGramLogitsProcessor, PrefixConstrainedLogitsProcessor, RepetitionPenaltyLogitsProcessor, SequenceBiasLogitsProcessor, TemperatureLogitsWarper, TopKLogitsWarper, TopPLogitsWarper, TypicalLogitsWarper, UnbatchedClassifierFreeGuidanceLogitsProcessor, ) from transformers.generation.logits_process import BarkEosPrioritizerLogitsProcessor @require_torch class LogitsProcessorTest(unittest.TestCase): def _get_uniform_logits(self, batch_size: int, length: int): scores = torch.ones((batch_size, length), device=torch_device, dtype=torch.float) / length return scores def test_min_length_dist_processor(self): vocab_size = 20 batch_size = 4 eos_token_id = 0 min_dist_processor = MinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id) # check that min length is applied at length 5 input_ids = ids_tensor((batch_size, 5), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = min_dist_processor(input_ids, scores) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist(), 4 * [-float("inf")]) # check that min length is not applied anymore at length 15 input_ids = ids_tensor((batch_size, 15), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = min_dist_processor(input_ids, scores) self.assertFalse(torch.isinf(scores_before_min_length).any()) @parameterized.expand([(0,), ([0, 18],)]) def test_new_min_length_dist_processor(self, eos_token_id: Union[int, List[int]]): vocab_size = 20 batch_size = 4 # check that first input is skipped (min new length applying) input_ids = ids_tensor((batch_size, 5), vocab_size=20) new_min_dist_processor = MinNewTokensLengthLogitsProcessor( prompt_length_to_skip=input_ids.shape[-1], min_new_tokens=3, eos_token_id=eos_token_id ) expected_eos_scores_before_min_length = batch_size * [-float("inf")] if isinstance(eos_token_id, list): expected_eos_scores_before_min_length *= len(eos_token_id) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = new_min_dist_processor(input_ids, scores) self.assertListEqual( scores_before_min_length[:, eos_token_id].flatten().tolist(), expected_eos_scores_before_min_length ) # check that, for skipping, now prompt length is 5, after that we expect first 5 tokens will be skipped self.assertTrue(new_min_dist_processor.prompt_length_to_skip == 5) # check that min length is applied at length 2 input_ids = ids_tensor((batch_size, 2), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = new_min_dist_processor(input_ids, scores) self.assertListEqual( scores_before_min_length[:, eos_token_id].flatten().tolist(), expected_eos_scores_before_min_length ) # check that min new length is applied at length 6 (because it has only 1 new token) input_ids = ids_tensor((batch_size, 6), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = new_min_dist_processor(input_ids, scores) self.assertListEqual( scores_before_min_length[:, eos_token_id].flatten().tolist(), expected_eos_scores_before_min_length ) # check that min new length is applied at length 7 (because it has only 2 new tokens) input_ids = ids_tensor((batch_size, 7), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = new_min_dist_processor(input_ids, scores) self.assertListEqual( scores_before_min_length[:, eos_token_id].flatten().tolist(), expected_eos_scores_before_min_length ) # check that min new length is not applied anymore at length 8 input_ids = ids_tensor((batch_size, 8), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = new_min_dist_processor(input_ids, scores) self.assertFalse(torch.isinf(scores_before_min_length).any()) # check that min new length is not applied anymore at length 15 input_ids = ids_tensor((batch_size, 15), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = new_min_dist_processor(input_ids, scores) self.assertFalse(torch.isinf(scores_before_min_length).any()) def test_temperature_dist_warper(self): input_ids = None length = 20 scores = self._get_uniform_logits(batch_size=2, length=length) # tweak scores to not be uniform anymore scores[1, 5] = (1 / length) + 0.1 # peak, 1st batch scores[1, 10] = (1 / length) - 0.4 # valley, 1st batch # compute softmax probs = nn.functional.softmax(scores, dim=-1) temp_dist_warper_sharper = TemperatureLogitsWarper(temperature=0.5) temp_dist_warper_smoother = TemperatureLogitsWarper(temperature=1.3) warped_prob_sharp = nn.functional.softmax(temp_dist_warper_sharper(input_ids, scores.clone()), dim=-1) warped_prob_smooth = nn.functional.softmax(temp_dist_warper_smoother(input_ids, scores.clone()), dim=-1) # uniform distribution stays uniform self.assertTrue(torch.allclose(probs[0, :], warped_prob_sharp[0, :], atol=1e-3)) self.assertTrue(torch.allclose(probs[0, :], warped_prob_smooth[0, :], atol=1e-3)) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max(), warped_prob_sharp[1, :].max()) self.assertGreater(probs[1, :].min(), warped_prob_sharp[1, :].min()) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max(), warped_prob_smooth[1, :].max()) self.assertLess(probs[1, :].min(), warped_prob_smooth[1, :].min()) def test_repetition_penalty_dist_process(self): input_ids = torch.tensor([[0, 1], [5, 0]], device=torch_device, dtype=torch.long) vocab_size = 10 scores = self._get_uniform_logits(batch_size=2, length=vocab_size) # give values special values scores[0, 0] = -(1 / vocab_size) scores[1, 5] = 4 / vocab_size rep_penalty_proc = RepetitionPenaltyLogitsProcessor(penalty=2.0) scores = rep_penalty_proc(input_ids, scores.clone()) # check that values were correctly changed self.assertAlmostEqual(scores[0, 0].item(), -(1 / vocab_size) * 2) self.assertAlmostEqual(scores[0, 1].item(), (1 / vocab_size) / 2) self.assertAlmostEqual(scores[1, 0].item(), (1 / vocab_size) / 2) self.assertAlmostEqual(scores[1, 5].item(), (4 / vocab_size) / 2) def test_encoder_repetition_penalty_dist_process(self): input_ids = torch.tensor([[0, 1], [5, 0]], device=torch_device, dtype=torch.long) vocab_size = 10 scores = self._get_uniform_logits(batch_size=2, length=vocab_size) # give values special values scores[0, 0] = -(1 / vocab_size) scores[1, 5] = 4 / vocab_size rep_penalty_proc = EncoderRepetitionPenaltyLogitsProcessor(penalty=2.0, encoder_input_ids=input_ids) scores = rep_penalty_proc(input_ids, scores.clone()) # check that values were correctly changed self.assertAlmostEqual(scores[0, 0].item(), -(1 / vocab_size) / 2) self.assertAlmostEqual(scores[0, 1].item(), (1 / vocab_size) * 2) self.assertAlmostEqual(scores[1, 0].item(), (1 / vocab_size) * 2) self.assertAlmostEqual(scores[1, 5].item(), (4 / vocab_size) * 2) # check that values not in the encoder ids were NOT changed self.assertAlmostEqual(scores[0, 2].item(), (1 / vocab_size)) self.assertAlmostEqual(scores[1, 2].item(), (1 / vocab_size)) def test_top_k_dist_warper(self): input_ids = None vocab_size = 10 batch_size = 2 # create ramp distribution ramp_logits = ( torch.arange(vocab_size, device=torch_device, dtype=torch.float).unsqueeze(0).repeat(batch_size, 1) ) ramp_logits[1:, : vocab_size // 2] = ramp_logits[1:, : vocab_size // 2] + vocab_size top_k_warp = TopKLogitsWarper(3) scores = top_k_warp(input_ids, ramp_logits) # check that correct tokens are filtered self.assertListEqual(torch.isinf(scores[0]).tolist(), 7 * [True] + 3 * [False]) self.assertListEqual(torch.isinf(scores[1]).tolist(), 2 * [True] + 3 * [False] + 5 * [True]) # check special cases length = 5 logits = self._get_uniform_logits(batch_size=batch_size, length=length) top_k_warp_safety_check = TopKLogitsWarper(top_k=1, filter_value=0.0, min_tokens_to_keep=3) scores = top_k_warp_safety_check(input_ids, logits) # uniform dist is not changed self.assertListEqual((scores == 0.0).to(torch.long).sum(dim=-1).tolist(), [0, 0]) ramp_logits = torch.arange(length, device=torch_device, dtype=torch.float).unsqueeze(0).repeat(batch_size, 1) scores = top_k_warp_safety_check(input_ids, ramp_logits) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).to(torch.long).sum(dim=-1).tolist(), [2, 2]) def test_top_p_dist_warper(self): input_ids = None vocab_size = 10 batch_size = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) dist = torch.log( torch.tensor([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]], device=torch_device, dtype=torch.float) ) top_p_warp = TopPLogitsWarper(0.8) filtered_dist = torch.exp(top_p_warp(input_ids, dist)) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 EXPECTED_FILTERED_DIST = torch.tensor( [[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]], device=torch_device, dtype=torch.float ) self.assertTrue(torch.allclose(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3)) # check edge cases with negative and extreme logits ramp_logits = torch.arange(vocab_size, device=torch_device, dtype=torch.float).unsqueeze(0).repeat( batch_size, 1 ) - (vocab_size // 2) # make ramp_logits more extreme ramp_logits[1] = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept top_p_warp = TopPLogitsWarper(0.9, min_tokens_to_keep=2, filter_value=0.0) filtered_dist = top_p_warp(input_ids, ramp_logits) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).to(torch.long).sum(dim=-1).tolist(), [3, 2]) def test_typical_dist_warper(self): input_ids = None vocab_size = 10 batch_size = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) dist = torch.log( torch.tensor([[0.97, 0.01, 0.01, 0.01], [0.4, 0.2, 0.2, 0.2]], device=torch_device, dtype=torch.float) ) typical_warp = TypicalLogitsWarper(0.5) filtered_dist = torch.exp(typical_warp(input_ids, dist)) # dist should be filtered to keep min num values so that sum is >= 0.7 # exp (-inf) => 0 EXPECTED_FILTERED_DIST = torch.tensor( [[0.97, 0.0, 0.0, 0.0], [0.0, 0.2, 0.2, 0.2]], device=torch_device, dtype=torch.float ) self.assertTrue(torch.allclose(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3)) # check special cases length = 5 logits = self._get_uniform_logits(batch_size=batch_size, length=length) typical_warp_safety_check = TypicalLogitsWarper(mass=0.5, filter_value=0.0, min_tokens_to_keep=3) scores = typical_warp_safety_check(input_ids, logits) # uniform dist is not changed self.assertListEqual((scores == 0.0).to(torch.long).sum(dim=-1).tolist(), [0, 0]) # check edge cases with negative and extreme logits ramp_logits = torch.arange(vocab_size, device=torch_device, dtype=torch.float).unsqueeze(0).repeat( batch_size, 1 ) - (vocab_size // 2) # make ramp_logits more extreme ramp_logits[1] = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept typical_warp = TypicalLogitsWarper(0.7, min_tokens_to_keep=2, filter_value=0.0) filtered_dist = typical_warp(input_ids, ramp_logits) # first batch should keep two tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).to(torch.long).sum(dim=-1).tolist(), [2, 2]) def test_epsilon_dist_warper(self): input_ids = None vocab_size = 10 batch_size = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) dist = torch.log( torch.tensor( [[0.87, 0.099, 0.001, 0.03], [0.4, 0.299, 0.101, 0.2]], device=torch_device, dtype=torch.float ) ) epsilon_warp = EpsilonLogitsWarper(0.1) filtered_dist = torch.exp(epsilon_warp(input_ids, dist)) # dist should be filtered to only keep values with proba >= 0.1 # exp (-inf) => 0 EXPECTED_FILTERED_DIST = torch.tensor( [[0.87, 0, 0, 0], [0.4, 0.299, 0.101, 0.2]], device=torch_device, dtype=torch.float ) self.assertTrue(torch.allclose(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3)) # check edge cases with negative and extreme logits ramp_logits = torch.arange(vocab_size, device=torch_device, dtype=torch.float).unsqueeze(0).repeat( batch_size, 1 ) - (vocab_size // 2) # make ramp_logits more extreme ramp_logits[1] = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept epsilon_warp = EpsilonLogitsWarper(5e-2, min_tokens_to_keep=2, filter_value=0.0) filtered_dist = epsilon_warp(input_ids, ramp_logits) # first batch should keep 3 tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).to(torch.long).sum(dim=-1).tolist(), [3, 2]) def test_eta_dist_warper(self): input_ids = None vocab_size = 10 batch_size = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) dist = torch.log( torch.tensor([[0.0, 0.1, 0.8, 0.1], [0.01, 0.04, 0.9, 0.05]], device=torch_device, dtype=torch.float) ) eta_warp = EtaLogitsWarper(0.0625) filtered_dist = torch.exp(eta_warp(input_ids, dist)) # dist should be filtered to only keep values with proba >= min(0.0625, sqrt(0.0625) * e^-H(p)) # min(0.0625, 0.1320) is the cutoff for the first row and min(0.0625, 0.1644) is for the second # where H is the entropy function and p is the probability vector. # exp (-inf) => 0 EXPECTED_FILTERED_DIST = torch.tensor( [[0.0, 0.1, 0.8, 0.1], [0.0, 0.0, 0.9, 0.0]], device=torch_device, dtype=torch.float ) self.assertTrue(torch.allclose(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3)) # check edge cases with negative and extreme logits ramp_logits = torch.arange(vocab_size, device=torch_device, dtype=torch.float).unsqueeze(0).repeat( batch_size, 1 ) - (vocab_size // 2) # make ramp_logits more extreme ramp_logits[1] = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept eta_warp = EtaLogitsWarper(0.1, min_tokens_to_keep=2, filter_value=0.0) filtered_dist = eta_warp(input_ids, ramp_logits) # first batch should keep 2 tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).to(torch.long).sum(dim=-1).tolist(), [2, 2]) def test_no_repeat_ngram_dist_processor(self): vocab_size = 3 batch_size = 2 input_ids = torch.tensor([[1, 1, 2, 1], [0, 1, 0, 1]], device=torch_device, dtype=torch.long) scores = self._get_uniform_logits(batch_size, vocab_size) no_repeat_proc_2_gram = NoRepeatNGramLogitsProcessor(2) no_repeat_proc_3_gram = NoRepeatNGramLogitsProcessor(3) filtered_scores_2_gram = no_repeat_proc_2_gram(input_ids, scores.clone()) filtered_scores_3_gram = no_repeat_proc_3_gram(input_ids, scores.clone()) # 2-gram would forbid 2nd and 3rd token (1,2) at 1st batch and 1st token (0) at 2nd batch self.assertListEqual(torch.isinf(filtered_scores_2_gram).tolist(), [[False, True, True], [True, False, False]]) # 3-gram would forbid no token at 1st batch and 1st token (0) at 2nd batch self.assertListEqual( torch.isinf(filtered_scores_3_gram).tolist(), [[False, False, False], [True, False, False]] ) def test_encoder_no_repeat_ngram_dist_processor(self): vocab_size = 3 num_beams = 2 batch_size = 1 encoder_input_ids = torch.tensor([1, 2, 1, 1], device=torch_device, dtype=torch.long) input_ids = torch.tensor([[1, 2, 1], [8, 0, 2]], device=torch_device, dtype=torch.long) scores = self._get_uniform_logits(batch_size * num_beams, vocab_size) no_repeat_proc_2_gram = EncoderNoRepeatNGramLogitsProcessor(2, encoder_input_ids=encoder_input_ids) no_repeat_proc_3_gram = EncoderNoRepeatNGramLogitsProcessor(3, encoder_input_ids=encoder_input_ids) filtered_scores_2_gram = no_repeat_proc_2_gram(input_ids, scores.clone()) filtered_scores_3_gram = no_repeat_proc_3_gram(input_ids, scores.clone()) # 2-gram would forbid 1st and 2nd token at 1st beam and 1st token (0) at 2nd beam self.assertListEqual(torch.isinf(filtered_scores_2_gram).tolist(), [[False, True, True], [False, True, False]]) # 3-gram would forbid 1st token at 1st beam and no token at 2nd beam self.assertListEqual( torch.isinf(filtered_scores_3_gram).tolist(), [[False, True, False], [False, False, False]] ) # Batched input vocab_size = 3 num_beams = 2 batch_size = 2 encoder_input_ids = torch.tensor([[1, 2, 1, 1], [0, 0, 2, 1]], device=torch_device, dtype=torch.long) input_ids = torch.tensor([[1, 2, 1], [1, 0, 2], [0, 0, 0], [0, 2, 2]], device=torch_device, dtype=torch.long) scores = self._get_uniform_logits(batch_size * num_beams, vocab_size) no_repeat_proc_2_gram = EncoderNoRepeatNGramLogitsProcessor(2, encoder_input_ids=encoder_input_ids) no_repeat_proc_3_gram = EncoderNoRepeatNGramLogitsProcessor(3, encoder_input_ids=encoder_input_ids) filtered_scores_2_gram = no_repeat_proc_2_gram(input_ids, scores.clone()) filtered_scores_3_gram = no_repeat_proc_3_gram(input_ids, scores.clone()) # 2gram # Batch 1 # - Beam 1: tokens (1, 2) forbidden # - Beam 2: tokens (1) forbidden # Batch 2 # - Beam 1: tokens (0, 2) forbidden # - Beam 2: tokens (1) forbidden self.assertListEqual( torch.isinf(filtered_scores_2_gram).tolist(), [[False, True, True], [False, True, False], [True, False, True], [False, True, False]], ) # Batch 1 # - Beam 1: tokens (1) forbidden # - Beam 2: tokens () forbidden # Batch 2 # - Beam 1: tokens (2) forbidden # - Beam 2: tokens () forbidden self.assertListEqual( torch.isinf(filtered_scores_3_gram).tolist(), [[False, True, False], [False, False, False], [False, False, True], [False, False, False]], ) def test_no_bad_words_dist_processor(self): vocab_size = 5 batch_size = 2 eos_token_id = 4 input_ids = torch.tensor([[0, 1, 3, 1], [0, 1, 0, 1]], device=torch_device, dtype=torch.long) bad_word_tokens = [[1], [4], [1, 0], [0, 1, 2], [1, 3, 1, 3]] scores = self._get_uniform_logits(batch_size, vocab_size) no_bad_words_dist_proc = NoBadWordsLogitsProcessor(bad_words_ids=bad_word_tokens, eos_token_id=eos_token_id) filtered_scores = no_bad_words_dist_proc(input_ids, scores.clone()) # batch 1: 1st, 2nd, and 4th (0, 1, 3) token are forbidden # batch 2: 1st, 2nd, and 3rd (0, 1, 2) token are forbidden # Note that 5th element cannot be forbidden as it is EOS token self.assertListEqual( torch.isinf(filtered_scores).tolist(), [[True, True, False, True, False], [True, True, True, False, False]] ) # check edge case no_bad_words_dist_proc = NoBadWordsLogitsProcessor(bad_words_ids=[[4]], eos_token_id=eos_token_id) filtered_scores = no_bad_words_dist_proc(input_ids, scores.clone()) self.assertTrue(torch.allclose(scores, filtered_scores, atol=1e-3)) def test_bias_dist_processor(self): vocab_size = 5 batch_size = 2 input_ids = torch.tensor([[0, 1, 3, 1], [0, 1, 0, 1]], device=torch_device, dtype=torch.long) positive_bias = {(1,): 100.0, (4,): 100.0} negative_bias = {(1, 0): -100.0, (0, 1, 2): -100.0, (1, 3, 1, 3): -100.0} # biases the same termination twice, to ensure we can handle overlapping terminations (it won't have an effect # on the test cases, though) negative_bias.update({(1, 3, 1, 3, 1, 3): -100.0}) sequence_bias = {**positive_bias, **negative_bias} # scores = 0 to facilitate checks scores = torch.zeros((batch_size, vocab_size), dtype=torch.float, device=torch_device) bias_dist_proc = SequenceBiasLogitsProcessor(sequence_bias=sequence_bias) filtered_scores = bias_dist_proc(input_ids, scores.clone()) # batch 1: positive bias: tokens (1, 4); negative bias: tokens (0, 3); neutral: tokens (2) # batch 2: positive bias: tokens (1, 4); negative bias: tokens (0, 2); neutral: tokens (3) self.assertListEqual( filtered_scores.tolist(), [[-100.0, 100.0, 0.0, -100.0, 100.0], [-100.0, 100.0, -100.0, 0.0, 100.0]] ) def test_processor_list(self): batch_size = 4 sequence_length = 10 vocab_size = 15 eos_token_id = 0 # dummy input_ids and scores input_ids = ids_tensor((batch_size, sequence_length), vocab_size) input_ids_comp = input_ids.clone() scores = self._get_uniform_logits(batch_size, vocab_size) scores_comp = scores.clone() # instantiate all dist processors min_dist_proc = MinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id) temp_dist_warp = TemperatureLogitsWarper(temperature=0.5) rep_penalty_proc = RepetitionPenaltyLogitsProcessor(penalty=2.0) top_k_warp = TopKLogitsWarper(3) top_p_warp = TopPLogitsWarper(0.8) no_repeat_proc = NoRepeatNGramLogitsProcessor(2) no_bad_words_dist_proc = NoBadWordsLogitsProcessor(bad_words_ids=[[1]], eos_token_id=eos_token_id) # no processor list scores = min_dist_proc(input_ids, scores) scores = temp_dist_warp(input_ids, scores) scores = rep_penalty_proc(input_ids, scores) scores = top_k_warp(input_ids, scores) scores = top_p_warp(input_ids, scores) scores = no_repeat_proc(input_ids, scores) scores = no_bad_words_dist_proc(input_ids, scores) # with processor list processor = LogitsProcessorList( [ min_dist_proc, temp_dist_warp, rep_penalty_proc, top_k_warp, top_p_warp, no_repeat_proc, no_bad_words_dist_proc, ] ) scores_comp = processor(input_ids, scores_comp) # scores should be equal self.assertTrue(torch.allclose(scores, scores_comp, atol=1e-3)) # input_ids should never be changed self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist()) def test_prefix_constrained_logits_processor(self): vocab_size = 5 batch_size = 2 input_ids = torch.tensor([[0, 1, 3, 1], [0, 1, 0, 1]], device=torch_device, dtype=torch.long) scores = self._get_uniform_logits(batch_size, vocab_size) def prefix_allowed_tokens_fn(batch_id, inputs_ids): return [[0, 1], [2, 3]][batch_id] prefix_constrained_logits_proc = PrefixConstrainedLogitsProcessor(prefix_allowed_tokens_fn, 1) filtered_scores = prefix_constrained_logits_proc(input_ids, scores.clone()) # batch 1: 1st, 2nd (0, 1) token are allowed # batch 2: 3rd, 4th (2, 3) token are allowed self.assertListEqual( torch.isinf(filtered_scores).tolist(), [[False, False, True, True, True], [True, True, False, False, True]] ) def empty_prefix_allowed_tokens_fn(batch_id, inputs_ids): return [] prefix_constrained_logits_proc = PrefixConstrainedLogitsProcessor(empty_prefix_allowed_tokens_fn, 1) self.assertRaises(ValueError, prefix_constrained_logits_proc, input_ids, scores.clone()) def test_hamming_diversity(self): vocab_size = 4 num_beams = 2 num_beam_groups = 2 scores = self._get_uniform_logits(num_beams, vocab_size) # batch_idx = 0 -> index batch_idx * num_beam_groups -> idx = 0 * 2 = 0 -> penalises tokens 1 # batch_idx = 1 -> index batch_idx * num_beam_groups -> idx = 1 * 2 = 2 -> penalises tokens 1 current_tokens = torch.tensor([0, 3, 1, 2], device=torch_device, dtype=torch.long) diversity_logits_processor = HammingDiversityLogitsProcessor( diversity_penalty=1.0, num_beams=num_beams, num_beam_groups=num_beam_groups ) processed_scores = diversity_logits_processor(None, scores, current_tokens, 1) self.assertTrue( torch.allclose( processed_scores[0], torch.tensor([-0.7500, 0.2500, 0.2500, 0.2500], device=torch_device), atol=1e-3 ) ) self.assertTrue( torch.allclose( processed_scores[1], torch.tensor([0.2500, -0.7500, 0.2500, 0.2500], device=torch_device), atol=1e-3 ) ) def test_forced_bos_token_logits_processor(self): vocab_size = 20 batch_size = 4 bos_token_id = 0 logits_processor = ForcedBOSTokenLogitsProcessor(bos_token_id=bos_token_id) # check that all scores are -inf except the bos_token_id score input_ids = ids_tensor((batch_size, 1), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores) self.assertTrue(torch.isneginf(scores[:, bos_token_id + 1 :]).all()) self.assertListEqual(scores[:, bos_token_id].tolist(), 4 * [0]) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 input_ids = ids_tensor((batch_size, 4), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores) self.assertFalse(torch.isinf(scores).any()) def test_forced_eos_token_logits_processor(self): vocab_size = 20 batch_size = 4 eos_token_id = 0 max_length = 5 logits_processor = ForcedEOSTokenLogitsProcessor(max_length=max_length, eos_token_id=eos_token_id) # check that all scores are -inf except the eos_token_id when max_length-1 is reached input_ids = ids_tensor((batch_size, 4), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores) self.assertTrue(torch.isneginf(scores[:, eos_token_id + 1 :]).all()) self.assertListEqual(scores[:, eos_token_id].tolist(), 4 * [0]) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length-1 is not reached input_ids = ids_tensor((batch_size, 3), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores) self.assertFalse(torch.isinf(scores).any()) def test_remove_nan_inf_logits_processor(self): scores = torch.tensor( [[0.0, 0.7, 0.8, float("nan")], [0.1, float("inf"), 0.3, float("-inf")]], device=torch_device ) input_ids = ids_tensor((2, 4), vocab_size=20) logits_processor = InfNanRemoveLogitsProcessor() scores = logits_processor(input_ids, scores) self.assertTrue( torch.allclose( scores, torch.tensor( [[0.0, 0.7, 0.8, 0.0], [0.1, torch.finfo(scores.dtype).max, 0.3, torch.finfo(scores.dtype).min]], device=torch_device, ), atol=1e-6, ) ) def test_exponential_decay_length_penalty(self): vocab_size = 20 batch_size = 4 eos_token_id = 0 penalty_start = 5 penalty_factor = 1.1 input_ids = ids_tensor((batch_size, 2), vocab_size=vocab_size) input_ids_seq_length = input_ids.shape[-1] length_decay_processor = ExponentialDecayLengthPenalty( exponential_decay_length_penalty=(penalty_start, penalty_factor), eos_token_id=eos_token_id, input_ids_seq_length=input_ids_seq_length, ) # check that penalty is not applied before start scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_start = torch.clone(scores) # clone scores as precessor updates them inplace scores_before_start = length_decay_processor(input_ids, scores_before_start) self.assertListEqual(scores_before_start[:, eos_token_id].tolist(), scores[:, eos_token_id].tolist()) # check that penalty is applied after start input_ids = ids_tensor((batch_size, 20), vocab_size=vocab_size) scores = self._get_uniform_logits(batch_size, vocab_size) scores_after_start = torch.clone(scores) # clone scores as precessor updates them inplace scores_after_start = length_decay_processor(input_ids, scores_after_start) self.assertTrue(torch.gt(scores_after_start[:, eos_token_id], scores[:, eos_token_id]).all()) # check the penalty increases negative scores input_ids = ids_tensor((batch_size, 20), vocab_size=vocab_size) scores = torch.neg(self._get_uniform_logits(batch_size, vocab_size)) scores_after_start = torch.clone(scores) # clone scores as precessor updates them inplace scores_after_start = length_decay_processor(input_ids, scores_after_start) self.assertTrue(torch.gt(scores_after_start[:, eos_token_id], scores[:, eos_token_id]).all()) def test_normalization(self): input_ids = None scores = torch.tensor( [[-23.18, -29.96, -43.54, 47.77], [-33.58, -26.87, -32.96, 22.51]], device=torch_device, dtype=torch.float ) logit_normalization = LogitNormalization() normalized_scores = logit_normalization(input_ids, scores).exp() ones = torch.ones(scores.shape[0], device=torch_device, dtype=torch.float) self.assertTrue(normalized_scores.sum(dim=-1).allclose(ones)) self.assertTrue(normalized_scores.allclose(scores.softmax(dim=-1))) def test_classifier_free_guidance(self): class Namespace(dict): pass logits_uncond = torch.tensor([[[1.0, 0, 1.5]]]) logits_cond = torch.tensor([[[1.0, 1.0, 1.0]]]) def dummy_model(input_ids, attention_mask, use_cache=True, past_key_values=None): out = Namespace() out.logits = logits_uncond out.past_key_values = None return out def lsm(x): return torch.nn.functional.log_softmax(x, dim=-1) # explicit unconditional prompt + attention mask input_ids = torch.LongTensor([[0]]) cfg = UnbatchedClassifierFreeGuidanceLogitsProcessor( 1.5, dummy_model, input_ids, torch.ones_like(input_ids, dtype=torch.long) ) out = cfg(input_ids, logits_cond)[0, -1] res = (lsm(logits_uncond) + 1.5 * (lsm(logits_cond) - lsm(logits_uncond)))[0, -1] self.assertAlmostEqual(out[0].item(), res[0].item()) self.assertAlmostEqual(out[1].item(), res[1].item()) self.assertAlmostEqual(out[2].item(), res[2].item()) # explicit unconditional prompt input_ids = torch.LongTensor([[0]]) cfg = UnbatchedClassifierFreeGuidanceLogitsProcessor(1.5, dummy_model, input_ids) out = cfg(input_ids, logits_cond)[0, -1] res = (lsm(logits_uncond) + 1.5 * (lsm(logits_cond) - lsm(logits_uncond)))[0, -1] self.assertAlmostEqual(out[0].item(), res[0].item()) self.assertAlmostEqual(out[1].item(), res[1].item()) self.assertAlmostEqual(out[2].item(), res[2].item()) # all implicit input_ids = torch.LongTensor([[0]]) cfg = UnbatchedClassifierFreeGuidanceLogitsProcessor(1.5, dummy_model) out = cfg(input_ids, logits_cond)[0, -1] res = (lsm(logits_uncond) + 1.5 * (lsm(logits_cond) - lsm(logits_uncond)))[0, -1] self.assertAlmostEqual(out[0].item(), res[0].item()) self.assertAlmostEqual(out[1].item(), res[1].item()) self.assertAlmostEqual(out[2].item(), res[2].item()) def test_early_stop_processor(self): input_ids = None eos_token_id = 2 min_eos_p = 0.1 ## some small float scores = self._get_uniform_logits(2, 4) scores[0][eos_token_id] = -6 ## less than log(min_eos_p) esp = BarkEosPrioritizerLogitsProcessor(eos_token_id=eos_token_id, min_eos_p=min_eos_p) actual_scores = esp(input_ids, scores) expected_scores_list = [ scores[0].tolist(), [float("-inf"), float("-inf"), scores[0][0], float("-inf")], ] self.assertListEqual(actual_scores.tolist(), expected_scores_list) def test_early_stop_processor_multi_eos(self): input_ids = None eos_token_id = [2, 3] min_eos_p = 0.1 ## some small float scores = self._get_uniform_logits(2, 4) scores[0][eos_token_id] = -6 ## less than log(min_eos_p) esp = BarkEosPrioritizerLogitsProcessor(eos_token_id=eos_token_id, min_eos_p=min_eos_p) actual_scores = esp(input_ids, scores) expected_scores_list = [ scores[0].tolist(), [float("-inf"), float("-inf"), scores[0][0], scores[0][0]], ] self.assertListEqual(actual_scores.tolist(), expected_scores_list)
transformers/tests/generation/test_logits_process.py/0
{ "file_path": "transformers/tests/generation/test_logits_process.py", "repo_id": "transformers", "token_count": 16668 }
388
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch AltCLIP model. """ import inspect import os import tempfile import unittest import numpy as np import requests from transformers import AltCLIPConfig, AltCLIPProcessor, AltCLIPTextConfig, AltCLIPVisionConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch import torch.nn as nn from transformers import AltCLIPModel, AltCLIPTextModel, AltCLIPVisionModel from transformers.models.altclip.modeling_altclip import ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class AltCLIPVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return AltCLIPVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = AltCLIPVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class AltCLIPVisionModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as CLIP does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (AltCLIPVisionModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = AltCLIPVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=AltCLIPVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="CLIP does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="AltCLIPVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="AltCLIPVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @unittest.skip(reason="AltCLIPVisionModel use the same cv backbone with CLIP model.") def test_model_from_pretrained(self): pass class AltCLIPTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, projection_dim=32, project_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.projection_dim = projection_dim self.project_dim = project_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return AltCLIPTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, project_dim=self.project_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, pad_token_id=1, ) def create_and_check_model(self, config, input_ids, input_mask): model = AltCLIPTextModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class AltCLIPTextModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (AltCLIPTextModel,) if is_torch_available() else () fx_compatible = True test_pruning = False test_head_masking = False # TODO (@SunMarc): Fix me @unittest.skip("It's broken.") def test_resize_tokens_embeddings(self): super().test_resize_tokens_embeddings() def setUp(self): self.model_tester = AltCLIPTextModelTester(self) self.config_tester = ConfigTester(self, config_class=AltCLIPTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_model_outputs_equivalence(self): pass @unittest.skip(reason="Result of the model is a dict") def test_hidden_states_output(self): pass @unittest.skip(reason="AltCLIP does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="AltCLIPTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="AltCLIPTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = AltCLIPTextModel.from_pretrained(model_name) self.assertIsNotNone(model) class AltCLIPModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = AltCLIPTextModelTester(parent, **text_kwargs) self.vision_model_tester = AltCLIPVisionModelTester(parent, **vision_kwargs) self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return AltCLIPConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64 ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = AltCLIPModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): model(input_ids, pixel_values, attention_mask) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "return_loss": True, } return config, inputs_dict # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @require_torch class AltCLIPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (AltCLIPModel,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": AltCLIPModel} if is_torch_available() else {} fx_compatible = True test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False # TODO: Fix the failed tests when this model gets more usage def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "FeatureExtractionPipelineTests": return True return False def setUp(self): self.model_tester = AltCLIPModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="CLIPModel does not have input/output embeddings") def test_model_common_attributes(self): pass # override as the `logit_scale` parameter initilization is different for AltCLIP def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: # check if `logit_scale` is initilized as per the original implementation if name == "logit_scale": self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] # CLIP needs pixel_values traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) @slow def test_model_from_pretrained(self): for model_name in ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = AltCLIPModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_vision @require_torch class AltCLIPModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "BAAI/AltCLIP" model = AltCLIPModel.from_pretrained(model_name).to(torch_device) processor = AltCLIPProcessor.from_pretrained(model_name) image = prepare_img() inputs = processor(text=["一张猫的照片", "一张狗的照片"], images=image, padding=True, return_tensors="pt").to(torch_device) # fmt: skip # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits self.assertEqual( outputs.logits_per_image.shape, torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), ) self.assertEqual( outputs.logits_per_text.shape, torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), ) probs = outputs.logits_per_image.softmax(dim=1) expected_probs = torch.tensor([[9.9942e-01, 5.7805e-04]], device=torch_device) self.assertTrue(torch.allclose(probs, expected_probs, atol=5e-3))
transformers/tests/models/altclip/test_modeling_altclip.py/0
{ "file_path": "transformers/tests/models/altclip/test_modeling_altclip.py", "repo_id": "transformers", "token_count": 9801 }
389
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeq2SeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class TFBlenderbotSmallModelTester: config_cls = BlenderbotSmallConfig config_updates = {} hidden_act = "gelu" def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=50, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id def prepare_config_and_inputs_for_common(self): input_ids = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size) eos_tensor = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1) input_ids = tf.concat([input_ids, eos_tensor], axis=1) decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.config_cls( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, ) inputs_dict = prepare_blenderbot_small_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def check_decoder_model_past_large_inputs(self, config, inputs_dict): model = TFBlenderbotSmallModel(config=config).get_decoder() input_ids = inputs_dict["input_ids"] input_ids = input_ids[:1, :] attention_mask = inputs_dict["attention_mask"][:1, :] head_mask = inputs_dict["head_mask"] self.batch_size = 1 # first forward pass outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = tf.cast(ids_tensor((self.batch_size, 3), 2), tf.int8) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)[0] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1]) # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def prepare_blenderbot_small_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = tf.cast(tf.math.not_equal(input_ids, config.pad_token_id), tf.int8) if decoder_attention_mask is None: decoder_attention_mask = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.int8), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id), tf.int8), ], axis=-1, ) if head_mask is None: head_mask = tf.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: decoder_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: cross_attn_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class TFBlenderbotSmallModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) all_generative_model_classes = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () pipeline_model_mapping = ( { "conversational": TFBlenderbotSmallForConditionalGeneration, "feature-extraction": TFBlenderbotSmallModel, "summarization": TFBlenderbotSmallForConditionalGeneration, "text2text-generation": TFBlenderbotSmallForConditionalGeneration, "translation": TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) is_encoder_decoder = True test_pruning = False test_onnx = False def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return pipeline_test_casse_name in ("TextGenerationPipelineTests", "ConversationalPipelineTests") def setUp(self): self.model_tester = TFBlenderbotSmallModelTester(self) self.config_tester = ConfigTester(self, config_class=BlenderbotSmallConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs) @require_tokenizers @require_tf class TFBlenderbot90MIntegrationTests(unittest.TestCase): src_text = [ "Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like " " i'm going to throw up.\nand why is that?" ] model_name = "facebook/blenderbot_small-90M" @cached_property def tokenizer(self): # use "old" tokenizer here because of bug when downloading new tokenizer return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M") @cached_property def model(self): model = TFAutoModelForSeq2SeqLM.from_pretrained(self.model_name) return model @slow def test_90_generation_from_long_input(self): model_inputs = self.tokenizer(self.src_text, return_tensors="tf") generated_ids = self.model.generate( model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=True, ) generated_words = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=True)[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
transformers/tests/models/blenderbot_small/test_modeling_tf_blenderbot_small.py/0
{ "file_path": "transformers/tests/models/blenderbot_small/test_modeling_tf_blenderbot_small.py", "repo_id": "transformers", "token_count": 4273 }
390
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Chinese-CLIP model. """ import inspect import os import tempfile import unittest import numpy as np import requests from transformers import ChineseCLIPConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, ChineseCLIPModel, ChineseCLIPTextModel, ChineseCLIPVisionModel, ) from transformers.models.chinese_clip.modeling_chinese_clip import CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ChineseCLIPProcessor class ChineseCLIPTextModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): """ Returns a tiny configuration by default. """ return ChineseCLIPTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = ChineseCLIPTextModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = ChineseCLIPTextModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict class ChineseCLIPVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return ChineseCLIPVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = ChineseCLIPVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class ChineseCLIPTextModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (ChineseCLIPTextModel,) if is_torch_available() else () fx_compatible = False # special case for ForPreTraining model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING): inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) inputs_dict["next_sentence_label"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = ChineseCLIPTextModelTester(self) self.config_tester = ConfigTester(self, config_class=ChineseCLIPTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_model_as_decoder_with_default_input_mask(self): # This regression test was failing with PyTorch < 1.3 ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) @slow def test_model_from_pretrained(self): for model_name in CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ChineseCLIPTextModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="ChineseCLIPTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="ChineseCLIPTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @require_torch class ChineseCLIPVisionModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as CHINESE_CLIP does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (ChineseCLIPVisionModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = ChineseCLIPVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=ChineseCLIPVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="CHINESE_CLIP does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="ChineseCLIPVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="ChineseCLIPVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ChineseCLIPVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) class ChineseCLIPModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = ChineseCLIPTextModelTester(parent, **text_kwargs) self.vision_model_tester = ChineseCLIPVisionModelTester(parent, **vision_kwargs) self.is_training = is_training def prepare_config_and_inputs(self): ( config, input_ids, token_type_ids, attention_mask, _, __, ___, ) = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, token_type_ids, attention_mask, pixel_values def get_config(self): return ChineseCLIPConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64 ) def create_and_check_model(self, config, input_ids, token_type_ids, attention_mask, pixel_values): model = ChineseCLIPModel(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values, attention_mask, token_type_ids) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, token_type_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "return_loss": True, } return config, inputs_dict @require_torch class ChineseCLIPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (ChineseCLIPModel,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": ChineseCLIPModel} if is_torch_available() else {} fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False def setUp(self): text_kwargs = {"use_labels": False, "batch_size": 12} vision_kwargs = {"batch_size": 12} self.model_tester = ChineseCLIPModelTester(self, text_kwargs, vision_kwargs) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="ChineseCLIPModel does not have input/output embeddings") def test_model_common_attributes(self): pass # override as the `logit_scale` parameter initilization is different for CHINESE_CLIP def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for sub_config_key in ("vision_config", "text_config"): sub_config = getattr(configs_no_init, sub_config_key, {}) setattr(configs_no_init, sub_config_key, _config_zero_init(sub_config)) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: # check if `logit_scale` is initilized as per the original implementation if name == "logit_scale": self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] # CHINESE_CLIP needs pixel_values traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) @slow def test_model_from_pretrained(self): for model_name in CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ChineseCLIPModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of Pikachu def prepare_img(): url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg" im = Image.open(requests.get(url, stream=True).raw) return im @require_vision @require_torch class ChineseCLIPModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "OFA-Sys/chinese-clip-vit-base-patch16" model = ChineseCLIPModel.from_pretrained(model_name).to(torch_device) processor = ChineseCLIPProcessor.from_pretrained(model_name) image = prepare_img() inputs = processor( text=["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"], images=image, padding=True, return_tensors="pt" ).to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits self.assertEqual( outputs.logits_per_image.shape, torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), ) self.assertEqual( outputs.logits_per_text.shape, torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), ) probs = outputs.logits_per_image.softmax(dim=1) expected_probs = torch.tensor([[1.2686e-03, 5.4499e-02, 6.7968e-04, 9.4355e-01]], device=torch_device) self.assertTrue(torch.allclose(probs, expected_probs, atol=5e-3))
transformers/tests/models/chinese_clip/test_modeling_chinese_clip.py/0
{ "file_path": "transformers/tests/models/chinese_clip/test_modeling_chinese_clip.py", "repo_id": "transformers", "token_count": 12666 }
391
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch DETR model. """ import inspect import math import unittest from transformers import DetrConfig, ResNetConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_timm, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DetrForObjectDetection, DetrForSegmentation, DetrModel if is_vision_available(): from PIL import Image from transformers import DetrImageProcessor class DetrModelTester: def __init__( self, parent, batch_size=8, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=8, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, num_queries=12, num_channels=3, min_size=200, max_size=200, n_targets=8, num_labels=91, ): self.parent = parent self.batch_size = batch_size self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.num_queries = num_queries self.num_channels = num_channels self.min_size = min_size self.max_size = max_size self.n_targets = n_targets self.num_labels = num_labels # we also set the expected seq length for both encoder and decoder self.encoder_seq_length = math.ceil(self.min_size / 32) * math.ceil(self.max_size / 32) self.decoder_seq_length = self.num_queries def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]) pixel_mask = torch.ones([self.batch_size, self.min_size, self.max_size], device=torch_device) labels = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) labels = [] for i in range(self.batch_size): target = {} target["class_labels"] = torch.randint( high=self.num_labels, size=(self.n_targets,), device=torch_device ) target["boxes"] = torch.rand(self.n_targets, 4, device=torch_device) target["masks"] = torch.rand(self.n_targets, self.min_size, self.max_size, device=torch_device) labels.append(target) config = self.get_config() return config, pixel_values, pixel_mask, labels def get_config(self): resnet_config = ResNetConfig( num_channels=3, embeddings_size=10, hidden_sizes=[10, 20, 30, 40], depths=[1, 1, 2, 1], hidden_act="relu", num_labels=3, out_features=["stage2", "stage3", "stage4"], out_indices=[2, 3, 4], ) return DetrConfig( d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, num_queries=self.num_queries, num_labels=self.num_labels, use_timm_backbone=False, backbone_config=resnet_config, backbone=None, use_pretrained_backbone=False, ) def prepare_config_and_inputs_for_common(self): config, pixel_values, pixel_mask, labels = self.prepare_config_and_inputs() inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def create_and_check_detr_model(self, config, pixel_values, pixel_mask, labels): model = DetrModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.decoder_seq_length, self.hidden_size) ) def create_and_check_detr_object_detection_head_model(self, config, pixel_values, pixel_mask, labels): model = DetrForObjectDetection(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, labels=labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) @require_torch class DetrModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( DetrModel, DetrForObjectDetection, DetrForSegmentation, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": DetrModel, "image-segmentation": DetrForSegmentation, "object-detection": DetrForObjectDetection, } if is_torch_available() else {} ) is_encoder_decoder = True test_torchscript = False test_pruning = False test_head_masking = False test_missing_keys = False # special case for head models def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ in ["DetrForObjectDetection", "DetrForSegmentation"]: labels = [] for i in range(self.model_tester.batch_size): target = {} target["class_labels"] = torch.ones( size=(self.model_tester.n_targets,), device=torch_device, dtype=torch.long ) target["boxes"] = torch.ones( self.model_tester.n_targets, 4, device=torch_device, dtype=torch.float ) target["masks"] = torch.ones( self.model_tester.n_targets, self.model_tester.min_size, self.model_tester.max_size, device=torch_device, dtype=torch.float, ) labels.append(target) inputs_dict["labels"] = labels return inputs_dict def setUp(self): self.model_tester = DetrModelTester(self) self.config_tester = ConfigTester(self, config_class=DetrConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() def test_detr_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_detr_model(*config_and_inputs) def test_detr_object_detection_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_detr_object_detection_head_model(*config_and_inputs) # TODO: check if this works again for PyTorch 2.x.y @unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0.") def test_multi_gpu_data_parallel_forward(self): pass @unittest.skip(reason="DETR does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="DETR does not have a get_input_embeddings method") def test_model_common_attributes(self): pass @unittest.skip(reason="DETR is not a generative model") def test_generate_without_input_ids(self): pass @unittest.skip(reason="DETR does not use token embeddings") def test_resize_tokens_embeddings(self): pass @slow def test_model_outputs_equivalence(self): # TODO Niels: fix me! pass def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True decoder_seq_length = self.model_tester.decoder_seq_length encoder_seq_length = self.model_tester.encoder_seq_length decoder_key_length = self.model_tester.decoder_seq_length encoder_key_length = self.model_tester.encoder_seq_length for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning # Object Detection model returns pred_logits and pred_boxes if model_class.__name__ == "DetrForObjectDetection": correct_outlen += 2 # Panoptic Segmentation model returns pred_logits, pred_boxes, pred_masks if model_class.__name__ == "DetrForSegmentation": correct_outlen += 3 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def test_retain_grad_hidden_states_attentions(self): # removed retain_grad and grad on decoder_hidden_states, as queries don't require grad config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_attentions = outputs.encoder_attentions[0] encoder_hidden_states.retain_grad() encoder_attentions.retain_grad() decoder_attentions = outputs.decoder_attentions[0] decoder_attentions.retain_grad() cross_attentions = outputs.cross_attentions[0] cross_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(encoder_attentions.grad) self.assertIsNotNone(decoder_attentions.grad) self.assertIsNotNone(cross_attentions.grad) def test_forward_auxiliary_loss(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.auxiliary_loss = True # only test for object detection and segmentation model for model_class in self.all_model_classes[1:]: model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) outputs = model(**inputs) self.assertIsNotNone(outputs.auxiliary_outputs) self.assertEqual(len(outputs.auxiliary_outputs), self.model_tester.num_hidden_layers - 1) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = ["pixel_values", "pixel_mask"] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" in arg_names else [] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) else: expected_arg_names = ["pixel_values", "pixel_mask"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_different_timm_backbone(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # let's pick a random timm backbone config.backbone = "tf_mobilenetv3_small_075" for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if model_class.__name__ == "DetrForObjectDetection": expected_shape = ( self.model_tester.batch_size, self.model_tester.num_queries, self.model_tester.num_labels + 1, ) self.assertEqual(outputs.logits.shape, expected_shape) self.assertTrue(outputs) def test_greyscale_images(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # use greyscale pixel values inputs_dict["pixel_values"] = floats_tensor( [self.model_tester.batch_size, 1, self.model_tester.min_size, self.model_tester.max_size] ) # let's set num_channels to 1 config.num_channels = 1 config.backbone_config.num_channels = 1 for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertTrue(outputs) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) configs_no_init.init_xavier_std = 1e9 for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if "bbox_attention" in name and "bias" not in name: self.assertLess( 100000, abs(param.data.max().item()), msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) TOLERANCE = 1e-4 # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_timm @require_vision @slow class DetrModelIntegrationTestsTimmBackbone(unittest.TestCase): @cached_property def default_image_processor(self): return DetrImageProcessor.from_pretrained("facebook/detr-resnet-50") if is_vision_available() else None def test_inference_no_head(self): model = DetrModel.from_pretrained("facebook/detr-resnet-50").to(torch_device) image_processor = self.default_image_processor image = prepare_img() encoding = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**encoding) expected_shape = torch.Size((1, 100, 256)) assert outputs.last_hidden_state.shape == expected_shape expected_slice = torch.tensor( [[0.0616, -0.5146, -0.4032], [-0.7629, -0.4934, -1.7153], [-0.4768, -0.6403, -0.7826]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4)) def test_inference_object_detection_head(self): model = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-50").to(torch_device) image_processor = self.default_image_processor image = prepare_img() encoding = image_processor(images=image, return_tensors="pt").to(torch_device) pixel_values = encoding["pixel_values"].to(torch_device) pixel_mask = encoding["pixel_mask"].to(torch_device) with torch.no_grad(): outputs = model(pixel_values, pixel_mask) # verify outputs expected_shape_logits = torch.Size((1, model.config.num_queries, model.config.num_labels + 1)) self.assertEqual(outputs.logits.shape, expected_shape_logits) expected_slice_logits = torch.tensor( [[-19.1194, -0.0893, -11.0154], [-17.3640, -1.8035, -14.0219], [-20.0461, -0.5837, -11.1060]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice_logits, atol=1e-4)) expected_shape_boxes = torch.Size((1, model.config.num_queries, 4)) self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes) expected_slice_boxes = torch.tensor( [[0.4433, 0.5302, 0.8853], [0.5494, 0.2517, 0.0529], [0.4998, 0.5360, 0.9956]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4)) # verify postprocessing results = image_processor.post_process_object_detection( outputs, threshold=0.3, target_sizes=[image.size[::-1]] )[0] expected_scores = torch.tensor([0.9982, 0.9960, 0.9955, 0.9988, 0.9987]).to(torch_device) expected_labels = [75, 75, 63, 17, 17] expected_slice_boxes = torch.tensor([40.1633, 70.8115, 175.5471, 117.9841]).to(torch_device) self.assertEqual(len(results["scores"]), 5) self.assertTrue(torch.allclose(results["scores"], expected_scores, atol=1e-4)) self.assertSequenceEqual(results["labels"].tolist(), expected_labels) self.assertTrue(torch.allclose(results["boxes"][0, :], expected_slice_boxes)) def test_inference_panoptic_segmentation_head(self): model = DetrForSegmentation.from_pretrained("facebook/detr-resnet-50-panoptic").to(torch_device) image_processor = self.default_image_processor image = prepare_img() encoding = image_processor(images=image, return_tensors="pt").to(torch_device) pixel_values = encoding["pixel_values"].to(torch_device) pixel_mask = encoding["pixel_mask"].to(torch_device) with torch.no_grad(): outputs = model(pixel_values, pixel_mask) # verify outputs expected_shape_logits = torch.Size((1, model.config.num_queries, model.config.num_labels + 1)) self.assertEqual(outputs.logits.shape, expected_shape_logits) expected_slice_logits = torch.tensor( [[-18.1565, -1.7568, -13.5029], [-16.8888, -1.4138, -14.1028], [-17.5709, -2.5080, -11.8654]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice_logits, atol=1e-4)) expected_shape_boxes = torch.Size((1, model.config.num_queries, 4)) self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes) expected_slice_boxes = torch.tensor( [[0.5344, 0.1789, 0.9285], [0.4420, 0.0572, 0.0875], [0.6630, 0.6887, 0.1017]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4)) expected_shape_masks = torch.Size((1, model.config.num_queries, 200, 267)) self.assertEqual(outputs.pred_masks.shape, expected_shape_masks) expected_slice_masks = torch.tensor( [[-7.7558, -10.8788, -11.9797], [-11.8881, -16.4329, -17.7451], [-14.7316, -19.7383, -20.3004]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.pred_masks[0, 0, :3, :3], expected_slice_masks, atol=1e-3)) # verify postprocessing results = image_processor.post_process_panoptic_segmentation( outputs, threshold=0.3, target_sizes=[image.size[::-1]] )[0] expected_shape = torch.Size([480, 640]) expected_slice_segmentation = torch.tensor([[4, 4, 4], [4, 4, 4], [4, 4, 4]], dtype=torch.int32).to( torch_device ) expected_number_of_segments = 5 expected_first_segment = {"id": 1, "label_id": 17, "was_fused": False, "score": 0.994097} number_of_unique_segments = len(torch.unique(results["segmentation"])) self.assertTrue( number_of_unique_segments, expected_number_of_segments + 1 ) # we add 1 for the background class self.assertTrue(results["segmentation"].shape, expected_shape) self.assertTrue(torch.allclose(results["segmentation"][:3, :3], expected_slice_segmentation, atol=1e-4)) self.assertTrue(len(results["segments_info"]), expected_number_of_segments) self.assertDictEqual(results["segments_info"][0], expected_first_segment) @require_vision @require_torch @slow class DetrModelIntegrationTests(unittest.TestCase): @cached_property def default_image_processor(self): return ( DetrImageProcessor.from_pretrained("facebook/detr-resnet-50", revision="no_timm") if is_vision_available() else None ) def test_inference_no_head(self): model = DetrModel.from_pretrained("facebook/detr-resnet-50", revision="no_timm").to(torch_device) image_processor = self.default_image_processor image = prepare_img() encoding = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**encoding) expected_shape = torch.Size((1, 100, 256)) assert outputs.last_hidden_state.shape == expected_shape expected_slice = torch.tensor( [[0.0616, -0.5146, -0.4032], [-0.7629, -0.4934, -1.7153], [-0.4768, -0.6403, -0.7826]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
transformers/tests/models/detr/test_modeling_detr.py/0
{ "file_path": "transformers/tests/models/detr/test_modeling_detr.py", "repo_id": "transformers", "token_count": 12983 }
392
# coding=utf-8 # Copyright 2022 Meta Platforms authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch FLAVA model. """ import inspect import os import random import tempfile import unittest import numpy as np import requests from transformers import ( FlavaConfig, FlavaImageCodebookConfig, FlavaImageConfig, FlavaMultimodalConfig, FlavaTextConfig, ) from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FlavaForPreTraining, FlavaImageCodebook, FlavaImageModel, FlavaModel, FlavaMultimodalModel, FlavaTextModel, ) from transformers.models.flava.modeling_flava import ( FLAVA_CODEBOOK_PRETRAINED_MODEL_ARCHIVE_LIST, FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST, ) else: FlavaModel = None FlavaForPreTraining = None torch = {} if is_vision_available(): from PIL import Image from transformers import FlavaProcessor class FlavaImageModelTester: def __init__( self, parent, batch_size=12, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, image_size=30, patch_size=2, num_channels=3, qkv_bias=True, mask_token=True, vocab_size=99, ): self.parent = parent self.batch_size = batch_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.qkv_bias = qkv_bias self.mask_token = mask_token self.vocab_size = vocab_size def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) num_patches = self.image_size // self.patch_size bool_masked_pos = ( torch.rand((self.batch_size, num_patches, num_patches), device=pixel_values.device) < 0.9 ).long() config = self.get_config() return config, pixel_values, bool_masked_pos def get_config(self): return FlavaImageConfig( hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, layer_norm_eps=self.layer_norm_eps, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, qkv_bias=self.qkv_bias, mask_token=self.mask_token, vocab_size=self.vocab_size, ) def create_and_check_model(self, config, pixel_values, bool_masked_pos): model = FlavaImageModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values, bool_masked_pos) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, bool_masked_pos = config_and_inputs inputs_dict = {"pixel_values": pixel_values, "bool_masked_pos": bool_masked_pos} return config, inputs_dict @require_torch class FlavaImageModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as FLAVA does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (FlavaImageModel,) if is_torch_available() else () test_pruning = False test_torchscript = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = FlavaImageModelTester(self) self.config_tester = ConfigTester(self, config_class=FlavaImageConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_inputs_embeds(self): # FLAVA does not use inputs_embeds pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True # in FLAVA, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) image_size = (self.model_tester.image_size, self.model_tester.image_size) patch_size = (self.model_tester.patch_size, self.model_tester.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_len = num_patches + 1 for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) # FLAVA has a different seq_length image_size = (self.model_tester.image_size, self.model_tester.image_size) patch_size = (self.model_tester.patch_size, self.model_tester.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_length = num_patches + 1 self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass # skip this test as FlavaImageModel has no base class and is # not available in MODEL_MAPPING def test_save_load_fast_init_from_base(self): pass # skip this test as FlavaImageModel has no base class and is # not available in MODEL_MAPPING def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = FlavaImageModel.from_pretrained(model_name) self.assertIsNotNone(model) class FlavaTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, vocab_size=102, type_vocab_size=2, max_position_embeddings=512, position_embedding_type="absolute", hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, qkv_bias=True, ): self.parent = parent self.batch_size = batch_size self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.seq_length = seq_length self.vocab_size = vocab_size self.type_vocab_size = type_vocab_size self.max_position_embeddings = max_position_embeddings self.position_embedding_type = position_embedding_type self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.qkv_bias = qkv_bias self.pad_token_id = pad_token_id def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) config = self.get_config() return config, input_ids, token_type_ids, input_mask def get_config(self): return FlavaTextConfig( vocab_size=self.vocab_size, type_vocab_size=self.type_vocab_size, max_position_embeddings=self.max_position_embeddings, position_embedding_type=self.position_embedding_type, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, layer_norm_eps=self.layer_norm_eps, pad_token_id=self.pad_token_id, qkv_bias=self.qkv_bias, ) def create_and_check_model(self, config, input_ids, token_type_ids, input_mask): model = FlavaTextModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, token_type_ids=token_type_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, token_type_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class FlavaTextModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (FlavaTextModel,) if is_torch_available() else () test_pruning = False test_head_masking = False test_torchscript = False def setUp(self): self.model_tester = FlavaTextModelTester(self) self.config_tester = ConfigTester(self, config_class=FlavaTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_inputs_embeds(self): # FLAVA does not use inputs_embeds pass # skip this test as FlavaTextModel has no base class and is # not available in MODEL_MAPPING def test_save_load_fast_init_from_base(self): pass # skip this test as FlavaTextModel has no base class and is # not available in MODEL_MAPPING def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = FlavaTextModel.from_pretrained(model_name) self.assertIsNotNone(model) class FlavaMultimodalModelTester: def __init__( self, parent, batch_size=12, seq_length=44, use_input_mask=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, qkv_bias=True, ce_ignore_index=-100, use_cls_token=True, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.use_input_mask = use_input_mask self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.qkv_bias = qkv_bias self.ce_ignore_index = ce_ignore_index self.use_cls_token = use_cls_token def prepare_config_and_inputs(self): hidden_states = floats_tensor([self.batch_size, self.seq_length - 1, self.hidden_size]) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, hidden_states, input_mask def get_config(self): return FlavaMultimodalConfig( hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, layer_norm_eps=self.layer_norm_eps, qkv_bias=self.qkv_bias, use_cls_token=self.use_cls_token, ce_ignore_index=self.ce_ignore_index, ) def create_and_check_model(self, config, hidden_states, input_mask): model = FlavaMultimodalModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(hidden_states, attention_mask=input_mask) result = model(hidden_states) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, hidden_states, input_mask = config_and_inputs inputs_dict = {"hidden_states": hidden_states, "attention_mask": input_mask} return config, inputs_dict @require_torch class FlavaMultimodalModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (FlavaMultimodalModel,) if is_torch_available() else () test_pruning = False test_head_masking = False test_resize_embeddings = False test_torchscript = False def setUp(self): self.model_tester = FlavaMultimodalModelTester(self) self.config_tester = ConfigTester( self, config_class=FlavaMultimodalConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["hidden_states"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model_common_attributes(self): # No embedding in multimodal model pass def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_inputs_embeds(self): # FLAVA does not use inputs_embeds pass # skip this test as FlavaMultimodalModel has no base class and is # not available in MODEL_MAPPING def test_save_load_fast_init_from_base(self): pass # skip this test as FlavaMultimodalModel has no base class and is # not available in MODEL_MAPPING def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = FlavaMultimodalModel.from_pretrained(model_name) self.assertIsNotNone(model) class FlavaImageCodebookTester: def __init__( self, parent, batch_size=12, image_size=112, num_channels=3, hidden_size=32, num_groups=2, vocab_size=99, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.hidden_size = hidden_size self.num_groups = num_groups self.vocab_size = vocab_size def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return FlavaImageCodebookConfig( hidden_size=self.hidden_size, num_groups=self.num_groups, vocab_size=self.vocab_size ) def create_and_check_model(self, config, pixel_values): model = FlavaImageCodebook(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) self.parent.assertEqual( result.shape, (self.batch_size, config.vocab_size, self.image_size // 8, self.image_size // 8) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class FlavaImageCodebookTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (FlavaImageCodebook,) if is_torch_available() else () test_pruning = False test_head_masking = False test_resize_embeddings = False test_torchscript = False has_attentions = False def setUp(self): self.model_tester = FlavaImageCodebookTester(self) self.config_tester = ConfigTester(self, config_class=FlavaImageCodebookConfig, has_text_modality=False) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) @unittest.skip(reason="Flava does not output attentions") def test_attention_outputs(self): pass def test_model_common_attributes(self): # No embedding in multimodal model pass def test_training(self): pass def test_hidden_states_output(self): pass def test_retain_grad_hidden_states_attentions(self): # no attentions pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_inputs_embeds(self): # FLAVA does not use inputs_embeds pass def test_model_outputs_equivalence(self): pass # skip this test as FlavaImageCodebook has no base class and is # not available in MODEL_MAPPING def test_save_load_fast_init_from_base(self): pass # skip this test as FlavaImageCodebook has no base class and is # not available in MODEL_MAPPING def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in FLAVA_CODEBOOK_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = FlavaImageCodebook.from_pretrained(model_name) self.assertIsNotNone(model) class FlavaModelTester: model_class = FlavaModel def __init__( self, parent, text_kwargs=None, image_kwargs=None, multimodal_kwargs=None, image_codebook_kwargs=None, is_training=True, hidden_size=32, projection_dim=32, initializer_range=0.02, layer_norm_eps=1e-12, ): if text_kwargs is None: text_kwargs = {} if image_kwargs is None: image_kwargs = {} if multimodal_kwargs is None: multimodal_kwargs = {} if image_codebook_kwargs is None: image_codebook_kwargs = {} self.parent = parent self.image_model_tester = FlavaImageModelTester(parent, **image_kwargs) self.text_model_tester = FlavaTextModelTester(parent, **text_kwargs) self.multimodal_model_tester = FlavaMultimodalModelTester(parent, **multimodal_kwargs) self.image_codebook_tester = FlavaImageCodebookTester(parent, **image_codebook_kwargs) self.is_training = is_training self.config_tester = ConfigTester(self, config_class=FlavaConfig, hidden_size=37) self.hidden_size = hidden_size self.projection_dim = projection_dim self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps def test_config(self): self.config_tester.run_common_tests() def prepare_config_and_inputs_for_common(self): _, pixel_values, bool_masked_pos = self.image_model_tester.prepare_config_and_inputs() _, input_ids, token_type_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() config = self.get_config() return config, { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "bool_masked_pos": bool_masked_pos, } def get_config(self): return FlavaConfig.from_configs( self.image_model_tester.get_config(), self.text_model_tester.get_config(), self.multimodal_model_tester.get_config(), self.image_codebook_tester.get_config(), hidden_size=self.hidden_size, projection_dim=self.projection_dim, initializer_range=self.initializer_range, layer_norm_eps=self.layer_norm_eps, ) def create_and_check_model(self, config, inputs): self._test_model(config, inputs, test_image=True) self._test_model(config, inputs, test_text=True) self._test_model(config, inputs, test_image=True, test_text=True) def _test_model(self, config, inputs, test_image=False, test_text=False): model = self.model_class(config).to(torch_device).eval() with torch.no_grad(): result = model( input_ids=inputs["input_ids"] if test_text else None, attention_mask=inputs["attention_mask"] if test_text else None, token_type_ids=inputs["token_type_ids"] if test_text else None, pixel_values=inputs["pixel_values"] if test_image else None, bool_masked_pos=inputs["bool_masked_pos"] if test_image else None, ) image_size = (self.image_model_tester.image_size, self.image_model_tester.image_size) patch_size = (self.image_model_tester.patch_size, self.image_model_tester.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) if test_image: self.parent.assertEqual( result.image_embeddings.shape, (self.image_model_tester.batch_size, num_patches + 1, self.image_model_tester.hidden_size), ) else: self.parent.assertIsNone(result.image_embeddings) if test_text: self.parent.assertEqual( result.text_embeddings.shape, ( self.text_model_tester.batch_size, self.text_model_tester.seq_length, self.text_model_tester.hidden_size, ), ) else: self.parent.assertIsNone(result.text_embeddings) if test_image and test_text: self.parent.assertEqual( result.multimodal_embeddings.shape, ( self.multimodal_model_tester.batch_size, self.text_model_tester.seq_length + num_patches + 2, self.multimodal_model_tester.hidden_size, ), ) else: self.parent.assertIsNone(result.multimodal_embeddings) @require_torch class FlavaModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (FlavaModel,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": FlavaModel} if is_torch_available() else {} class_for_tester = FlavaModelTester test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False def setUp(self): self.model_tester = self.class_for_tester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_model(*config_and_inputs) # hidden_states are tested in individual model tests def test_hidden_states_output(self): pass # input_embeds are tested in individual model tests def test_inputs_embeds(self): pass # tested in individual model tests def test_retain_grad_hidden_states_attentions(self): pass # FlavaModel does not have input/output embeddings def test_model_common_attributes(self): pass # override as the `logit_scale` parameter initilization is different for FLAVA def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: # check if `logit_scale` is initilized as per the original implementation if name == "logit_scale" or name == "flava.logit_scale": self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False configs_no_init.return_loss = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] # FLAVA needs pixel_values if "input_ids_masked" in inputs_dict: # For pretraining inputs = (input_ids, inputs_dict["input_ids_masked"], pixel_values) else: inputs = (input_ids, pixel_values) traced_model = torch.jit.trace(model, inputs) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() # Non persistent buffers won't be in original state dict loaded_model_state_dict.pop("text_model.embeddings.token_type_ids", None) non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_load_image_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Save FlavaConfig and check if we can load FlavaImageConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) image_config = FlavaImageConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.image_config.to_dict(), image_config.to_dict()) # Save FlavaConfig and check if we can load FlavaTextConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = FlavaTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) # Save FlavaConfig and check if we can load FlavaMultimodalConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) multimodal_config = FlavaMultimodalConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.multimodal_config.to_dict(), multimodal_config.to_dict()) # overwrite from common since FlavaModel/TFFlavaModel return FLAVAOutput/TFFLAVAOutput @slow def test_model_from_pretrained(self): for model_name in FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = FlavaModel.from_pretrained(model_name) self.assertIsNotNone(model) class FlavaForPreTrainingTester(FlavaModelTester): model_class = FlavaForPreTraining def prepare_config_and_inputs_for_common(self): _, pixel_values, bool_masked_pos = self.image_model_tester.prepare_config_and_inputs() _, input_ids, token_type_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() config = self.get_config() input_ids_masked = input_ids.detach().clone() input_ids_masked[:, 1:3] = 100 mlm_labels = input_ids.detach().clone() mlm_labels[:, :] = config.ce_ignore_index mlm_labels[:, 1:3] = input_ids[:, 1:3] mim_labels = torch.randint( 0, self.image_model_tester.vocab_size, bool_masked_pos.size(), device=bool_masked_pos.device ).long() mim_labels[bool_masked_pos.ne(True)] = config.ce_ignore_index itm_labels = torch.ones(mlm_labels.size(0), device=bool_masked_pos.device).long() return config, { "input_ids": input_ids, "input_ids_masked": input_ids_masked, "token_type_ids": token_type_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "bool_masked_pos": bool_masked_pos, "mlm_labels": mlm_labels, "mim_labels": mim_labels, "itm_labels": itm_labels, "return_loss": True, } def _test_model(self, config, inputs, test_image=False, test_text=False): model = self.model_class(config).to(torch_device).eval() with torch.no_grad(): result = model( input_ids=inputs["input_ids"] if test_text else None, input_ids_masked=inputs["input_ids_masked"] if test_text else None, attention_mask=inputs["attention_mask"] if test_text else None, token_type_ids=inputs["token_type_ids"] if test_text else None, pixel_values=inputs["pixel_values"] if test_image else None, bool_masked_pos=inputs["bool_masked_pos"] if test_image else None, mlm_labels=inputs["mlm_labels"], mim_labels=inputs["mim_labels"], itm_labels=inputs["itm_labels"], return_loss=inputs["return_loss"], ) image_size = (self.image_model_tester.image_size, self.image_model_tester.image_size) patch_size = (self.image_model_tester.patch_size, self.image_model_tester.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) if test_image: self.parent.assertEqual( result.image_embeddings.shape, (self.image_model_tester.batch_size, num_patches + 1, self.image_model_tester.hidden_size), ) if not test_text: self.parent.assertEqual( result.loss_info.mim.dim(), 0, ) self.parent.assertEqual( result.mim_logits.shape, (inputs["bool_masked_pos"].sum().item(), self.image_model_tester.vocab_size), ) else: self.parent.assertIsNone(result.image_embeddings) if test_text: self.parent.assertEqual( result.text_embeddings.shape, ( self.text_model_tester.batch_size, self.text_model_tester.seq_length, self.text_model_tester.hidden_size, ), ) if not test_image: self.parent.assertEqual(result.loss_info.mlm.dim(), 0) self.parent.assertEqual( result.mlm_logits.shape, ( (inputs["mlm_labels"] != self.multimodal_model_tester.ce_ignore_index).sum().item(), self.text_model_tester.vocab_size, ), ) else: self.parent.assertIsNone(result.text_embeddings) if test_image and test_text: self.parent.assertEqual( result.multimodal_masked_embeddings.shape, ( self.multimodal_model_tester.batch_size, self.text_model_tester.seq_length + num_patches + 2, self.multimodal_model_tester.hidden_size, ), ) self.parent.assertEqual( result.itm_logits.shape, (self.text_model_tester.batch_size, 2), ) self.parent.assertEqual( result.mmm_text_logits.shape, ( (inputs["mlm_labels"] != self.multimodal_model_tester.ce_ignore_index).sum().item(), self.text_model_tester.vocab_size, ), ) self.parent.assertEqual( result.mmm_image_logits.shape, (inputs["bool_masked_pos"].sum().item(), self.image_model_tester.vocab_size), ) self.parent.assertEqual( result.contrastive_logits_per_image.shape, (self.image_model_tester.batch_size, self.text_model_tester.batch_size), ) self.parent.assertEqual( result.contrastive_logits_per_text.shape, (self.text_model_tester.batch_size, self.image_model_tester.batch_size), ) for item in [ result.loss_info.global_contrastive, result.loss_info.itm, result.loss_info.mmm_text, result.loss_info.mmm_image, ]: self.parent.assertEqual(item.dim(), 0) for item in [result.loss_info.mim, result.loss_info.mlm]: self.parent.assertIsNone(item) else: self.parent.assertIsNone(result.multimodal_masked_embeddings) for item in [ result.loss_info.global_contrastive, result.loss_info.itm, result.loss_info.mmm_text, result.loss_info.mmm_image, ]: self.parent.assertIsNone(item) self.parent.assertIsNone(result.multimodal_embeddings) @require_torch class FlavaForPreTrainingTest(FlavaModelTest): all_model_classes = (FlavaForPreTraining,) if is_torch_available() else () class_for_tester = FlavaForPreTrainingTester test_torchscript = False @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @require_vision @require_torch class FlavaModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "facebook/flava-full" model = FlavaModel.from_pretrained(model_name).to(torch_device) processor = FlavaProcessor.from_pretrained(model_name) image = prepare_img() inputs = processor( text=["a photo of a cat", "a photo of a dog"], images=[image, image], padding="max_length", max_length=77, return_tensors="pt", ).to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs, return_dict=True) # verify the embeddings self.assertAlmostEqual(outputs.image_embeddings.sum().item(), -1352.53540, places=4) self.assertAlmostEqual(outputs.text_embeddings.sum().item(), -198.98225, places=4) self.assertAlmostEqual(outputs.multimodal_embeddings.sum().item(), -3988.51367, places=4) @require_vision @require_torch class FlavaForPreTrainingIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "facebook/flava-full" model = FlavaForPreTraining.from_pretrained(model_name).to(torch_device) processor = FlavaProcessor.from_pretrained(model_name) torch.manual_seed(1) random.seed(1) image = prepare_img() inputs = processor( text=["a photo of a cat", "a photo of a dog"], images=[image, image], padding="max_length", max_length=77, return_tensors="pt", return_codebook_pixels=True, return_image_mask=True, ) # Create a clone of the input_ids tensor that will be its masked version inputs["input_ids_masked"] = inputs["input_ids"].clone() # Mask the tokens "a" & "cat" from the "a photo of a cat" text using the special 103 value inputs["input_ids_masked"][0, 4:6] = 103 # MLM labels. It is a cloned version of input_ids where all values are -100 (i.e., ignored) # except those that are masked, whose original values are stored inputs["mlm_labels"] = inputs["input_ids"].clone() inputs["mlm_labels"][:, :] = -100 inputs["mlm_labels"][0, 4:6] = inputs["input_ids"][0, 4:6] inputs = inputs.to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits self.assertEqual( outputs.contrastive_logits_per_image.shape, torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), ) self.assertEqual( outputs.contrastive_logits_per_text.shape, torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), ) expected_logits = torch.tensor([[16.1291, 8.4033], [16.1291, 8.4033]], device=torch_device) self.assertTrue(torch.allclose(outputs.contrastive_logits_per_image, expected_logits, atol=1e-3)) self.assertAlmostEqual(outputs.loss_info.mmm_text.item(), 1.75533199, places=4) self.assertAlmostEqual(outputs.loss_info.mmm_image.item(), 7.0290069, places=4) self.assertAlmostEqual(outputs.loss.item(), 11.0626, places=4) @slow def test_inference_with_itm_labels(self): model_name = "facebook/flava-full" model = FlavaForPreTraining.from_pretrained(model_name).to(torch_device) processor = FlavaProcessor.from_pretrained(model_name) torch.manual_seed(1) random.seed(1) image = prepare_img() inputs = processor( text=["a photo of a cat", "a photo of a dog"], images=[image, image], padding="max_length", max_length=77, return_tensors="pt", return_codebook_pixels=True, return_image_mask=True, ) # Create a clone of the input_ids tensor that will be its masked version inputs["input_ids_masked"] = inputs["input_ids"].clone() # Mask the tokens "a" & "cat" from the "a photo of a cat" text using the special 103 value inputs["input_ids_masked"][0, 4:6] = 103 # MLM labels. It is a cloned version of input_ids where all values are -100 (i.e., ignored) # except those that are masked, whose original values are stored inputs["mlm_labels"] = inputs["input_ids"].clone() inputs["mlm_labels"][:, :] = -100 inputs["mlm_labels"][0, 4:6] = inputs["input_ids"][0, 4:6] # Manually create the itm_labels tensor that indicates if the image-text match. # In this case, the firs pair matches and the second does not inputs["itm_labels"] = torch.tensor([1, 0]) inputs = inputs.to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits self.assertEqual( outputs.contrastive_logits_per_image.shape, torch.Size((torch.count_nonzero(inputs["itm_labels"]).item(), inputs.input_ids.shape[0])), ) self.assertEqual( outputs.contrastive_logits_per_text.shape, torch.Size((torch.count_nonzero(inputs["itm_labels"]).item(), inputs.pixel_values.shape[0])), ) expected_logits = torch.tensor([[16.1291, 8.4033], [16.1291, 8.4033]], device=torch_device) self.assertTrue(torch.allclose(outputs.contrastive_logits_per_image, expected_logits, atol=1e-3)) self.assertAlmostEqual(outputs.loss_info.mmm_text.item(), 1.75533199, places=4) self.assertAlmostEqual(outputs.loss_info.mmm_image.item(), 6.89590501, places=4) self.assertAlmostEqual(outputs.loss.item(), 9.1995, places=4)
transformers/tests/models/flava/test_modeling_flava.py/0
{ "file_path": "transformers/tests/models/flava/test_modeling_flava.py", "repo_id": "transformers", "token_count": 25176 }
393
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Fuyu model. """ import io import unittest import requests from transformers import FuyuConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from transformers.utils import cached_property from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_vision_available(): from PIL import Image if is_torch_available() and is_vision_available(): from transformers import FuyuProcessor if is_torch_available(): import torch from transformers import FuyuForCausalLM class FuyuModelTester: def __init__( self, parent, batch_size=13, seq_length=7, image_size=30, patch_size=15, num_channels=3, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, pad_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.pad_token_id = pad_token_id self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) sequence_labels = None token_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) config = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels def get_config(self): return FuyuConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, ) def create_and_check_model( self, config, input_ids, input_mask, sequence_labels, token_labels, ): model = FuyuForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, input_mask, sequence_labels, token_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = FuyuForCausalLM(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, input_mask, sequence_labels, token_labels, encoder_hidden_states, encoder_attention_mask, ): model = FuyuForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, input_mask, sequence_labels, token_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = FuyuForCausalLM(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, sequence_labels, token_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class FuyuModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (FuyuForCausalLM,) if is_torch_available() else () pipeline_model_mapping = {"text-generation": FuyuForCausalLM} if is_torch_available() else {} test_head_masking = False test_pruning = False test_cpu_offload = False test_disk_offload = False test_model_parallel = False def setUp(self): self.model_tester = FuyuModelTester(self) @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass # TODO: Fix me (once this model gets more usage) @unittest.skip("Does not work on the tiny model.") def test_disk_offload_bin(self): super().test_disk_offload() # TODO: Fix me (once this model gets more usage) @unittest.skip("Does not work on the tiny model.") def test_disk_offload_safetensors(self): super().test_disk_offload() # TODO: Fix me (once this model gets more usage) @unittest.skip("Does not work on the tiny model.") def test_model_parallelism(self): super().test_model_parallelism() @slow @require_torch_gpu class FuyuModelIntegrationTest(unittest.TestCase): @cached_property def default_processor(self): return FuyuProcessor.from_pretrained("adept/fuyu-8b") @cached_property def default_model(self): return FuyuForCausalLM.from_pretrained("adept/fuyu-8b") def test_greedy_generation(self): processor = self.default_processor model = self.default_model url = "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/bus.png" image = Image.open(io.BytesIO(requests.get(url).content)) text_prompt_coco_captioning = "Generate a coco-style caption.\n" inputs = processor(text=text_prompt_coco_captioning, images=image, return_tensors="pt") generated_ids = model.generate(**inputs, max_new_tokens=10) # take the last 8 tokens (in order to skip special \n\x04 characters) and decode them generated_text = processor.batch_decode(generated_ids[:, -8:], skip_special_tokens=True)[0] self.assertEqual(generated_text, "A blue bus parked on the side of a road.") """ @slow @require_torch_accelerator def test_model_8b_chat_greedy_generation_bus_color(self): EXPECTED_TEXT_COMPLETION = "The bus is blue.\n|ENDOFTEXT|" text_prompt_bus_color = "What color is the bus?\n" model_inputs_bus_color = self.processor(text=text_prompt_bus_color, images=self.bus_image_pil) generated_tokens = self.model.generate(**model_inputs_bus_color, max_new_tokens=10) text = self.processor.tokenizer.batch_decode(generated_tokens) end_sequence = text[0].split("\x04")[1] clean_sequence = ( end_sequence[: end_sequence.find("|ENDOFTEXT|") + len("|ENDOFTEXT|")] if "|ENDOFTEXT|" in end_sequence else end_sequence ) self.assertEqual(EXPECTED_TEXT_COMPLETION, clean_sequence) @slow @require_torch_accelerator def test_model_8b_chat_greedy_generation_chart_vqa(self): EXPECTED_TEXT_TOKENS = ["The","life expectancy","at","birth","of male","s in","","20","18","is","","80",".","7",".","\n","|ENDOFTEXT|",] # fmt: skip expected_text_completion = " ".join(EXPECTED_TEXT_TOKENS) # TODO make sure the end string matches text_prompt_chart_vqa = "What is the highest life expectancy at birth of male?\n" chart_image_url = ( "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/chart.png" ) chart_image_pil = Image.open(io.BytesIO(requests.get(chart_image_url).content)) model_inputs_chart_vqa = self.processor(text=text_prompt_chart_vqa, images=chart_image_pil) generated_tokens = self.model.generate(**model_inputs_chart_vqa, max_new_tokens=10) text = self.processor.tokenizer.batch_decode(generated_tokens) end_sequence = text[0].split("\x04")[1] clean_sequence = ( end_sequence[: end_sequence.find("|ENDOFTEXT|") + len("|ENDOFTEXT|")] if "|ENDOFTEXT|" in end_sequence else end_sequence ) self.assertEqual(expected_text_completion, clean_sequence) @slow @require_torch_accelerator def test_model_8b_chat_greedy_generation_bounding_box(self): EXPECTED_TEXT_COMPLETION = "\x00194213202244\x01|ENDOFTEXT|" text_prompt_bbox = "When presented with a box, perform OCR to extract text contained within it. If provided with text, generate the corresponding bounding box.\\nWilliams" # noqa: E231 bbox_image_url = "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/bbox_sample_image.png" bbox_image_pil = Image.open(io.BytesIO(requests.get(bbox_image_url).content)) model_inputs_bbox = self.processor(text=text_prompt_bbox, images=bbox_image_pil) generated_tokens = self.model.generate(**model_inputs_bbox, max_new_tokens=10) text = self.processor.tokenizer.batch_decode(generated_tokens) end_sequence = text[0].split("\x04")[1] clean_sequence = ( end_sequence[: end_sequence.find("|ENDOFTEXT|") + len("|ENDOFTEXT|")] if "|ENDOFTEXT|" in end_sequence else end_sequence ) self.assertEqual(EXPECTED_TEXT_COMPLETION, clean_sequence) """
transformers/tests/models/fuyu/test_modeling_fuyu.py/0
{ "file_path": "transformers/tests/models/fuyu/test_modeling_fuyu.py", "repo_id": "transformers", "token_count": 6740 }
394
# coding=utf-8 # Copyright 2023 Toshiyuki Sakamoto(tanreinama) and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_jinja, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class GPTSanJapaneseTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = GPTSanJapaneseTokenizer test_rust_tokenizer = False from_pretrained_kwargs = {"do_clean_text": False, "add_prefix_space": False} def setUp(self): super().setUp() vocab_tokens = ["こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "<BR>", "<SP>", "<TAB>", "<URL>", "<EMAIL>", "<TEL>", "<DATE>", "<PRICE>", "<BLOCK>", "<KIGOU>", "<U2000U2BFF>", "<|emoji1|>", "<unk>", "<|bagoftoken|>", "<|endoftext|>"] # fmt: skip emoji_tokens = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} # 😀 self.special_tokens_map = {"unk_token": "<unk>"} self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.emoji_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["emoji_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) with open(self.emoji_file, "w") as emoji_writer: emoji_writer.write(json.dumps(emoji_tokens)) def get_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname, **kwargs) # Copied from tests.models.gpt_neox_japanese.test_tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizationTest.get_input_output_texts def get_input_output_texts(self, tokenizer): input_text = "こんにちは、世界。 \nこんばんは、㔺界。😀" output_text = "こんにちは、世界。 \nこんばんは、世界。😀" return input_text, output_text # Copied from tests.models.gpt_neox_japanese.test_tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizationTest.get_clean_sequence def get_clean_sequence(self, tokenizer): input_text, output_text = self.get_input_output_texts(tokenizer) ids = tokenizer.encode(output_text, add_special_tokens=False) text = tokenizer.decode(ids, clean_up_tokenization_spaces=False) return text, ids # Copied from tests.models.gpt_neox_japanese.test_tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizationTest.test_pretokenized_inputs def test_pretokenized_inputs(self): pass # TODO add if relevant # Copied from tests.models.gpt_neox_japanese.test_tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizationTest.test_maximum_encoding_length_pair_input def test_maximum_encoding_length_pair_input(self): pass # TODO add if relevant # Copied from tests.models.gpt_neox_japanese.test_tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizationTest.test_maximum_encoding_length_single_input def test_maximum_encoding_length_single_input(self): pass # TODO add if relevant # Copied from tests.models.gpt_neox_japanese.test_tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizationTest.test_full_tokenizer def test_full_tokenizer(self): tokenizer = self.get_tokenizer() # Testing tokenization input_text = "こんにちは、世界。 こんばんは、㔺界。" expected_token = ["こん", "にちは", "、", "世界", "。", "<SP>", "こん", "ばんは", "、", "㔺界", "。"] tokens = tokenizer.tokenize(input_text) self.assertListEqual(tokens, expected_token) # Testing conversion to ids without special tokens expected_ids = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] input_ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual(input_ids, expected_ids) # Testing conversion to ids with special tokens input_tokens = tokens + [tokenizer.unk_token] expected_ids = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] input_ids = tokenizer.convert_tokens_to_ids(input_tokens) self.assertListEqual(input_ids, expected_ids) def test_token_bagging(self): tokenizer = self.get_tokenizer() # Testing tokenization input_text = "こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。" expected_text = "こんにちは、、、、世界。こんばんは、、、、世界。" tokens = tokenizer.encode(input_text) output_text = tokenizer.decode(tokens) self.assertEqual(output_text, expected_text) @slow def test_prefix_input(self): tokenizer = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese") # Testing tokenization prefix_text = "こんにちは、世界。" input_text = "こんばんは、㔺界。😀" expected_text = "こんにちは、世界。こんばんは、世界。😀" tokens_1 = tokenizer.encode(prefix_text + input_text) tokens_2 = tokenizer.encode("", prefix_text=prefix_text + input_text) tokens_3 = tokenizer.encode(input_text, prefix_text=prefix_text) output_text_1 = tokenizer.decode(tokens_1) output_text_2 = tokenizer.decode(tokens_2) output_text_3 = tokenizer.decode(tokens_3) self.assertEqual(output_text_1, expected_text) self.assertEqual(output_text_2, expected_text) self.assertEqual(output_text_3, expected_text) @slow def test_token_type_ids(self): tokenizer = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese") # Testing tokenization prefix_text = "こんにちは、世界。" input_text = "こんばんは、㔺界。😀" len_prefix = len(tokenizer.encode(prefix_text)) - 2 len_text = len(tokenizer.encode(input_text)) - 2 expected_mask_1 = [1] + [0] * (len_prefix + len_text + 1) expected_mask_2 = [1] * (len_prefix + len_text + 1) + [0] expected_mask_3 = [1] + [1] * (len_prefix) + [0] * (len_text + 1) type_id_1 = tokenizer(prefix_text + input_text).token_type_ids type_id_2 = tokenizer("", prefix_text=prefix_text + input_text).token_type_ids type_id_3 = tokenizer(input_text, prefix_text=prefix_text).token_type_ids self.assertListEqual(type_id_1, expected_mask_1) self.assertListEqual(type_id_2, expected_mask_2) self.assertListEqual(type_id_3, expected_mask_3) @slow def test_prefix_tokens(self): tokenizer = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese") x_token_1 = tokenizer.encode("あンいワ") x_token_2 = tokenizer.encode("", prefix_text="あンいワ") x_token_3 = tokenizer.encode("いワ", prefix_text="あン") self.assertEqual(tokenizer.decode(x_token_1), tokenizer.decode(x_token_2)) self.assertEqual(tokenizer.decode(x_token_1), tokenizer.decode(x_token_3)) self.assertNotEqual(x_token_1, x_token_2) self.assertNotEqual(x_token_1, x_token_3) self.assertEqual(x_token_1[1], x_token_2[-1]) # SEG token self.assertEqual(x_token_1[1], x_token_3[3]) # SEG token @slow def test_batch_encode(self): tokenizer = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese") input_pairs = [["武田信玄", "は、"], ["織田信長", "の配下の、"]] x_token = tokenizer(input_pairs, padding=True) x_token_2 = tokenizer.batch_encode_plus(input_pairs, padding=True) # fmt: off expected_outputs = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]] expected_typeids = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] expected_attmask = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids, expected_outputs) self.assertListEqual(x_token.token_type_ids, expected_typeids) self.assertListEqual(x_token.attention_mask, expected_attmask) self.assertListEqual(x_token_2.input_ids, expected_outputs) self.assertListEqual(x_token_2.token_type_ids, expected_typeids) self.assertListEqual(x_token_2.attention_mask, expected_attmask) # Copied from tests.models.gpt_neox_japanese.test_tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizationTest.test_conversion_reversible def test_conversion_reversible(self): # Intentionally convert some words to accommodate character fluctuations unique to Japanese pass # Copied from tests.models.gpt_neox_japanese.test_tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizationTest.test_padding_different_model_input_name def test_padding_different_model_input_name(self): # tokenizer has no padding token pass @require_jinja def test_tokenization_for_chat(self): tokenizer = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese") # This is in English, but it's just here to make sure the chat control tokens are being added properly test_chats = [ [{"role": "system", "content": "You are a helpful chatbot."}, {"role": "user", "content": "Hello!"}], [ {"role": "system", "content": "You are a helpful chatbot."}, {"role": "user", "content": "Hello!"}, {"role": "assistant", "content": "Nice to meet you."}, ], [{"role": "assistant", "content": "Nice to meet you."}, {"role": "user", "content": "Hello!"}], ] tokenized_chats = [tokenizer.apply_chat_template(test_chat) for test_chat in test_chats] # fmt: off expected_tokens = [ [35993, 35998, 35637, 35659, 35665, 35716, 35645, 35662, 35649, 35716, 35645, 35716, 35652, 35649, 35656, 35660, 35650, 35665, 35656, 35716, 35647, 35652, 35645, 35664, 35646, 35659, 35664, 35595, 35716, 35999, 35993, 35998, 35620, 35649, 35656, 35656, 35659, 35582, 35716, 35999], [35993, 35998, 35637, 35659, 35665, 35716, 35645, 35662, 35649, 35716, 35645, 35716, 35652, 35649, 35656, 35660, 35650, 35665, 35656, 35716, 35647, 35652, 35645, 35664, 35646, 35659, 35664, 35595, 35716, 35999, 35993, 35998, 35620, 35649, 35656, 35656, 35659, 35582, 35716, 35999, 35993, 35998, 35626, 35653, 35647, 35649, 35716, 35664, 35659, 35716, 35657, 35649, 35649, 35664, 35716, 35669, 35659, 35665, 35595, 35716, 35999], [35993, 35998, 35626, 35653, 35647, 35649, 35716, 35664, 35659, 35716, 35657, 35649, 35649, 35664, 35716, 35669, 35659, 35665, 35595, 35716, 35999, 35993, 35998, 35620, 35649, 35656, 35656, 35659, 35582, 35716, 35999] ] # fmt: on for tokenized_chat, expected_tokens in zip(tokenized_chats, expected_tokens): self.assertListEqual(tokenized_chat, expected_tokens)
transformers/tests/models/gptsan_japanese/test_tokenization_gptsan_japanese.py/0
{ "file_path": "transformers/tests/models/gptsan_japanese/test_tokenization_gptsan_japanese.py", "repo_id": "transformers", "token_count": 5044 }
395
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from transformers.testing_utils import TestCasePlus, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, IdeficsImageProcessor, IdeficsProcessor, LlamaTokenizerFast, PreTrainedTokenizerFast, ) @require_torch @require_vision class IdeficsProcessorTest(TestCasePlus): def setUp(self): super().setUp() self.checkpoint_path = self.get_auto_remove_tmp_dir() image_processor = IdeficsImageProcessor() tokenizer = LlamaTokenizerFast.from_pretrained("HuggingFaceM4/tiny-random-idefics") processor = IdeficsProcessor(image_processor, tokenizer) processor.save_pretrained(self.checkpoint_path) self.input_keys = ["pixel_values", "input_ids", "attention_mask", "image_attention_mask"] def get_tokenizer(self, **kwargs): return AutoProcessor.from_pretrained(self.checkpoint_path, **kwargs).tokenizer def get_image_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.checkpoint_path, **kwargs).image_processor def prepare_prompts(self): """This function prepares a list of PIL images""" num_images = 2 images = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8) for x in range(num_images)] images = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in images] # print([type(x) for x in images]) # die prompts = [ # text and 1 image [ "User:", images[0], "Describe this image.\nAssistant:", ], # text and images [ "User:", images[0], "Describe this image.\nAssistant: An image of two dogs.\n", "User:", images[1], "Describe this image.\nAssistant:", ], # only text [ "User:", "Describe this image.\nAssistant: An image of two kittens.\n", "User:", "Describe this image.\nAssistant:", ], # only images [ images[0], images[1], ], ] return prompts def test_save_load_pretrained_additional_features(self): processor = IdeficsProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor()) processor.save_pretrained(self.checkpoint_path) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0) processor = IdeficsProcessor.from_pretrained( self.checkpoint_path, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, PreTrainedTokenizerFast) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, IdeficsImageProcessor) def test_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = IdeficsProcessor(tokenizer=tokenizer, image_processor=image_processor) prompts = self.prepare_prompts() # test that all prompts succeeded input_processor = processor(prompts, return_tensors="pt") for key in self.input_keys: assert torch.is_tensor(input_processor[key]) def test_tokenizer_decode(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = IdeficsProcessor(tokenizer=tokenizer, image_processor=image_processor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_tokenizer_padding(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer(padding_side="right") processor = IdeficsProcessor(tokenizer=tokenizer, image_processor=image_processor) predicted_tokens = [ "<s> Describe this image.\nAssistant:<unk><unk><unk><unk><unk><unk><unk><unk><unk>", "<s> Describe this image.\nAssistant:<unk><unk><unk><unk><unk><unk><unk><unk><unk><unk>", ] prompts = [[prompt] for prompt in self.prepare_prompts()[2]] max_length = processor(prompts, padding="max_length", truncation=True, max_length=20) longest = processor(prompts, padding="longest", truncation=True, max_length=30) decoded_max_length = processor.tokenizer.decode(max_length["input_ids"][-1]) decoded_longest = processor.tokenizer.decode(longest["input_ids"][-1]) self.assertEqual(decoded_max_length, predicted_tokens[1]) self.assertEqual(decoded_longest, predicted_tokens[0]) def test_model_input_names(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = IdeficsProcessor(tokenizer=tokenizer, image_processor=image_processor) prompts = self.prepare_prompts() inputs = processor(prompts) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertSetEqual(set(inputs.keys()), set(self.input_keys))
transformers/tests/models/idefics/test_processor_idefics.py/0
{ "file_path": "transformers/tests/models/idefics/test_processor_idefics.py", "repo_id": "transformers", "token_count": 2692 }
396
# coding=utf-8 # Copyright 2018 The Microsoft Research Asia LayoutLM Team Authors, The Hugging Face Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import LayoutLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LayoutLMForMaskedLM, LayoutLMForQuestionAnswering, LayoutLMForSequenceClassification, LayoutLMForTokenClassification, LayoutLMModel, ) class LayoutLMModelTester: """You can also import this e.g from .test_modeling_layoutlm import LayoutLMModelTester""" def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, range_bbox=1000, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.range_bbox = range_bbox def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) bbox = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox) # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: t = bbox[i, j, 3] bbox[i, j, 3] = bbox[i, j, 1] bbox[i, j, 1] = t if bbox[i, j, 2] < bbox[i, j, 0]: t = bbox[i, j, 2] bbox[i, j, 2] = bbox[i, j, 0] bbox[i, j, 0] = t input_mask = None if self.use_input_mask: input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return LayoutLMConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, ) def create_and_check_model( self, config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = LayoutLMModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, bbox, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, bbox, token_type_ids=token_type_ids) result = model(input_ids, bbox) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = LayoutLMForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, bbox, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_sequence_classification( self, config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = LayoutLMForSequenceClassification(config) model.to(torch_device) model.eval() result = model( input_ids, bbox, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = LayoutLMForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, bbox, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = LayoutLMForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class LayoutLMModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( LayoutLMModel, LayoutLMForMaskedLM, LayoutLMForSequenceClassification, LayoutLMForTokenClassification, LayoutLMForQuestionAnswering, ) if is_torch_available() else None ) pipeline_model_mapping = ( { "document-question-answering": LayoutLMForQuestionAnswering, "feature-extraction": LayoutLMModel, "fill-mask": LayoutLMForMaskedLM, "text-classification": LayoutLMForSequenceClassification, "token-classification": LayoutLMForTokenClassification, "zero-shot": LayoutLMForSequenceClassification, } if is_torch_available() else {} ) fx_compatible = True def setUp(self): self.model_tester = LayoutLMModelTester(self) self.config_tester = ConfigTester(self, config_class=LayoutLMConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def prepare_layoutlm_batch_inputs(): # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: # fmt: off input_ids = torch.tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]],device=torch_device) # noqa: E231 attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],],device=torch_device) # noqa: E231 bbox = torch.tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]],device=torch_device) # noqa: E231 token_type_ids = torch.tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]],device=torch_device) # noqa: E231 # these are sequence labels (i.e. at the token level) labels = torch.tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]],device=torch_device) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_torch class LayoutLMModelIntegrationTest(unittest.TestCase): @slow def test_forward_pass_no_head(self): model = LayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased").to(torch_device) input_ids, attention_mask, bbox, token_type_ids, labels = prepare_layoutlm_batch_inputs() # forward pass outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids) # test the sequence output on [0, :3, :3] expected_slice = torch.tensor( [[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]], device=torch_device, ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-3)) # test the pooled output on [1, :3] expected_slice = torch.tensor([-0.6580, -0.0214, 0.8552], device=torch_device) self.assertTrue(torch.allclose(outputs.pooler_output[1, :3], expected_slice, atol=1e-3)) @slow def test_forward_pass_sequence_classification(self): # initialize model with randomly initialized sequence classification head model = LayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased", num_labels=2).to( torch_device ) input_ids, attention_mask, bbox, token_type_ids, _ = prepare_layoutlm_batch_inputs() # forward pass outputs = model( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=torch.tensor([1, 1], device=torch_device), ) # test whether we get a loss as a scalar loss = outputs.loss expected_shape = torch.Size([]) self.assertEqual(loss.shape, expected_shape) # test the shape of the logits logits = outputs.logits expected_shape = torch.Size((2, 2)) self.assertEqual(logits.shape, expected_shape) @slow def test_forward_pass_token_classification(self): # initialize model with randomly initialized token classification head model = LayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased", num_labels=13).to( torch_device ) input_ids, attention_mask, bbox, token_type_ids, labels = prepare_layoutlm_batch_inputs() # forward pass outputs = model( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=labels ) # test the loss calculation to be around 2.65 # expected_loss = torch.tensor(2.65, device=torch_device) # The loss is currently somewhat random and can vary between 0.1-0.3 atol. # self.assertTrue(torch.allclose(outputs.loss, expected_loss, atol=0.1)) # test the shape of the logits logits = outputs.logits expected_shape = torch.Size((2, 25, 13)) self.assertEqual(logits.shape, expected_shape) @slow def test_forward_pass_question_answering(self): # initialize model with randomly initialized token classification head model = LayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased").to(torch_device) input_ids, attention_mask, bbox, token_type_ids, labels = prepare_layoutlm_batch_inputs() # forward pass outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids) # test the shape of the logits expected_shape = torch.Size((2, 25)) self.assertEqual(outputs.start_logits.shape, expected_shape) self.assertEqual(outputs.end_logits.shape, expected_shape)
transformers/tests/models/layoutlm/test_modeling_layoutlm.py/0
{ "file_path": "transformers/tests/models/layoutlm/test_modeling_layoutlm.py", "repo_id": "transformers", "token_count": 8034 }
397
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch MaskFormer model. """ import copy import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import ( require_torch, require_torch_accelerator, require_torch_fp16, require_torch_multi_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class MaskFormerModelTester: def __init__( self, parent, batch_size=2, is_training=True, use_auxiliary_loss=False, num_queries=10, num_channels=3, min_size=32 * 4, max_size=32 * 6, num_labels=4, mask_feature_size=32, num_hidden_layers=2, num_attention_heads=2, ): self.parent = parent self.batch_size = batch_size self.is_training = is_training self.use_auxiliary_loss = use_auxiliary_loss self.num_queries = num_queries self.num_channels = num_channels self.min_size = min_size self.max_size = max_size self.num_labels = num_labels self.mask_feature_size = mask_feature_size # This is passed to the decoder config. We add it to the model tester here for testing self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to( torch_device ) pixel_mask = torch.ones([self.batch_size, self.min_size, self.max_size], device=torch_device) mask_labels = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size], device=torch_device) > 0.5 ).float() class_labels = (torch.rand((self.batch_size, self.num_labels), device=torch_device) > 0.5).long() config = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def get_config(self): return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1], embed_dim=16, hidden_size=32, num_heads=[1, 1, 2, 2], ), backbone=None, decoder_config=DetrConfig( decoder_ffn_dim=64, decoder_layers=self.num_hidden_layers, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=64, encoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, num_queries=self.num_queries, d_model=self.mask_feature_size, ), mask_feature_size=self.mask_feature_size, fpn_feature_size=self.mask_feature_size, num_channels=self.num_channels, num_labels=self.num_labels, ) def prepare_config_and_inputs_for_common(self): config, pixel_values, pixel_mask, _, _ = self.prepare_config_and_inputs() inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def check_output_hidden_state(self, output, config): encoder_hidden_states = output.encoder_hidden_states pixel_decoder_hidden_states = output.pixel_decoder_hidden_states transformer_decoder_hidden_states = output.transformer_decoder_hidden_states self.parent.assertTrue(len(encoder_hidden_states), len(config.backbone_config.depths)) self.parent.assertTrue(len(pixel_decoder_hidden_states), len(config.backbone_config.depths)) self.parent.assertTrue(len(transformer_decoder_hidden_states), config.decoder_config.decoder_layers) def create_and_check_maskformer_model(self, config, pixel_values, pixel_mask, output_hidden_states=False): with torch.no_grad(): model = MaskFormerModel(config=config) model.to(torch_device) model.eval() output = model(pixel_values=pixel_values, pixel_mask=pixel_mask) output = model(pixel_values, output_hidden_states=True) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape, (self.batch_size, self.num_queries, self.mask_feature_size), ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(output.encoder_last_hidden_state is not None) if output_hidden_states: self.check_output_hidden_state(output, config) def create_and_check_maskformer_instance_segmentation_head_model( self, config, pixel_values, pixel_mask, mask_labels, class_labels ): model = MaskFormerForInstanceSegmentation(config=config) model.to(torch_device) model.eval() def comm_check_on_output(result): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(result.encoder_last_hidden_state is not None) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape, (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4), ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) comm_check_on_output(result) result = model( pixel_values=pixel_values, pixel_mask=pixel_mask, mask_labels=mask_labels, class_labels=class_labels ) comm_check_on_output(result) self.parent.assertTrue(result.loss is not None) self.parent.assertEqual(result.loss.shape, torch.Size([])) @require_torch class MaskFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) is_encoder_decoder = False test_pruning = False test_head_masking = False test_missing_keys = False def setUp(self): self.model_tester = MaskFormerModelTester(self) self.config_tester = ConfigTester(self, config_class=MaskFormerConfig, has_text_modality=False) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if return_labels: if model_class in [MaskFormerForInstanceSegmentation]: inputs_dict["mask_labels"] = torch.zeros( ( self.model_tester.batch_size, self.model_tester.num_labels, self.model_tester.min_size, self.model_tester.max_size, ), dtype=torch.float32, device=torch_device, ) inputs_dict["class_labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_labels), dtype=torch.long, device=torch_device ) return inputs_dict def test_config(self): self.config_tester.run_common_tests() def test_maskformer_model(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(config, **inputs, output_hidden_states=False) def test_maskformer_instance_segmentation_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*config_and_inputs) @unittest.skip(reason="MaskFormer does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="MaskFormer does not have a get_input_embeddings method") def test_model_common_attributes(self): pass @unittest.skip(reason="MaskFormer is not a generative model") def test_generate_without_input_ids(self): pass @unittest.skip(reason="MaskFormer does not use token embeddings") def test_resize_tokens_embeddings(self): pass @require_torch_multi_gpu @unittest.skip( reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def test_multi_gpu_data_parallel_forward(self): pass @slow def test_model_from_pretrained(self): for model_name in ["facebook/maskformer-swin-small-coco"]: model = MaskFormerModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_model_with_labels(self): size = (self.model_tester.min_size,) * 2 inputs = { "pixel_values": torch.randn((2, 3, *size), device=torch_device), "mask_labels": torch.randn((2, 10, *size), device=torch_device), "class_labels": torch.zeros(2, 10, device=torch_device).long(), } model = MaskFormerForInstanceSegmentation(MaskFormerConfig()).to(torch_device) outputs = model(**inputs) self.assertTrue(outputs.loss is not None) def test_hidden_states_output(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(config, **inputs, output_hidden_states=True) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # Check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) # encoder_hidden_states, pixel_decoder_hidden_states, transformer_decoder_hidden_states, hidden_states added_hidden_states = 4 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) def test_retain_grad_hidden_states_attentions(self): # only MaskFormerForInstanceSegmentation has the loss model_class = self.all_model_classes[1] config, pixel_values, pixel_mask, mask_labels, class_labels = self.model_tester.prepare_config_and_inputs() config.output_hidden_states = True config.output_attentions = True model = model_class(config) model.to(torch_device) model.train() outputs = model(pixel_values, mask_labels=mask_labels, class_labels=class_labels) encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() pixel_decoder_hidden_states = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't transformer_decoder_hidden_states = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() attentions = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(pixel_decoder_hidden_states.grad) self.assertIsNotNone(transformer_decoder_hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_forward_auxiliary_loss(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_auxiliary_loss = True config.output_auxiliary_logits = True config.output_hidden_states = True # only test for object detection and segmentation model for model_class in self.all_model_classes[1:]: model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) outputs = model(**inputs) self.assertIsNotNone(outputs.auxiliary_logits) self.assertEqual(len(outputs.auxiliary_logits), self.model_tester.num_channels - 1) TOLERANCE = 1e-4 # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_vision @slow class MaskFormerModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco") if is_vision_available() else None ) def test_inference_no_head(self): model = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(image, return_tensors="pt").to(torch_device) inputs_shape = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(inputs_shape, (1, 3, 800, 1088)) with torch.no_grad(): outputs = model(**inputs) expected_slice_hidden_state = torch.tensor( [[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(torch_device) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3], expected_slice_hidden_state, atol=TOLERANCE ) ) expected_slice_hidden_state = torch.tensor( [[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(torch_device) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3], expected_slice_hidden_state, atol=TOLERANCE ) ) expected_slice_hidden_state = torch.tensor( [[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(torch_device) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3], expected_slice_hidden_state, atol=TOLERANCE ) ) def test_inference_instance_segmentation_head(self): model = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco") .to(torch_device) .eval() ) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(image, return_tensors="pt").to(torch_device) inputs_shape = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(inputs_shape, (1, 3, 800, 1088)) with torch.no_grad(): outputs = model(**inputs) # masks_queries_logits masks_queries_logits = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape, (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4), ) expected_slice = [ [-1.3737124, -1.7724937, -1.9364233], [-1.5977281, -1.9867939, -2.1523695], [-1.5795398, -1.9269832, -2.093942], ] expected_slice = torch.tensor(expected_slice).to(torch_device) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], expected_slice, atol=TOLERANCE)) # class_queries_logits class_queries_logits = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape, (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) expected_slice = torch.tensor( [ [1.6512e00, -5.2572e00, -3.3519e00], [3.6169e-02, -5.9025e00, -2.9313e00], [1.0766e-04, -7.7630e00, -5.1263e00], ] ).to(torch_device) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], expected_slice, atol=TOLERANCE)) def test_inference_instance_segmentation_head_resnet_backbone(self): model = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff") .to(torch_device) .eval() ) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(image, return_tensors="pt").to(torch_device) inputs_shape = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(inputs_shape, (1, 3, 800, 1088)) with torch.no_grad(): outputs = model(**inputs) # masks_queries_logits masks_queries_logits = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape, (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4), ) expected_slice = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]] expected_slice = torch.tensor(expected_slice).to(torch_device) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], expected_slice, atol=TOLERANCE)) # class_queries_logits class_queries_logits = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape, (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) expected_slice = torch.tensor( [[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], expected_slice, atol=TOLERANCE)) @require_torch_accelerator @require_torch_fp16 def test_inference_fp16(self): model = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff") .to(torch_device, dtype=torch.float16) .eval() ) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(image, return_tensors="pt").to(torch_device, dtype=torch.float16) with torch.no_grad(): _ = model(**inputs) def test_with_segmentation_maps_and_loss(self): model = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco") .to(torch_device) .eval() ) image_processor = self.default_image_processor inputs = image_processor( [np.zeros((3, 400, 333)), np.zeros((3, 400, 333))], segmentation_maps=[np.zeros((384, 384)).astype(np.float32), np.zeros((384, 384)).astype(np.float32)], return_tensors="pt", ) inputs["pixel_values"] = inputs["pixel_values"].to(torch_device) inputs["mask_labels"] = [el.to(torch_device) for el in inputs["mask_labels"]] inputs["class_labels"] = [el.to(torch_device) for el in inputs["class_labels"]] with torch.no_grad(): outputs = model(**inputs) self.assertTrue(outputs.loss is not None)
transformers/tests/models/maskformer/test_modeling_maskformer.py/0
{ "file_path": "transformers/tests/models/maskformer/test_modeling_maskformer.py", "repo_id": "transformers", "token_count": 10411 }
398
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch MGP-STR model. """ import unittest import requests from transformers import MgpstrConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MgpstrForSceneTextRecognition if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor class MgpstrModelTester: def __init__( self, parent, is_training=False, batch_size=13, image_size=(32, 128), patch_size=4, num_channels=3, max_token_length=27, num_character_labels=38, num_bpe_labels=99, num_wordpiece_labels=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, mlp_ratio=4.0, patch_embeds_hidden_size=257, output_hidden_states=None, ): self.parent = parent self.is_training = is_training self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.max_token_length = max_token_length self.num_character_labels = num_character_labels self.num_bpe_labels = num_bpe_labels self.num_wordpiece_labels = num_wordpiece_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.mlp_ratio = mlp_ratio self.patch_embeds_hidden_size = patch_embeds_hidden_size self.output_hidden_states = output_hidden_states def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]]) config = self.get_config() return config, pixel_values def get_config(self): return MgpstrConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, max_token_length=self.max_token_length, num_character_labels=self.num_character_labels, num_bpe_labels=self.num_bpe_labels, num_wordpiece_labels=self.num_wordpiece_labels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, mlp_ratio=self.mlp_ratio, output_hidden_states=self.output_hidden_states, ) def create_and_check_model(self, config, pixel_values): model = MgpstrForSceneTextRecognition(config) model.to(torch_device) model.eval() with torch.no_grad(): generated_ids = model(pixel_values) self.parent.assertEqual( generated_ids[0][0].shape, (self.batch_size, self.max_token_length, self.num_character_labels) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class MgpstrModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (MgpstrForSceneTextRecognition,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": MgpstrForSceneTextRecognition} if is_torch_available() else {} fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False test_attention_outputs = False def setUp(self): self.model_tester = MgpstrModelTester(self) self.config_tester = ConfigTester(self, config_class=MgpstrConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="MgpstrModel does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) @unittest.skip(reason="MgpstrModel does not support feedforward chunking") def test_feed_forward_chunking(self): pass def test_gradient_checkpointing_backward_compatibility(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if not model_class.supports_gradient_checkpointing: continue config.gradient_checkpointing = True model = model_class(config) self.assertTrue(model.is_gradient_checkpointing) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.patch_embeds_hidden_size, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) # override as the `logit_scale` parameter initilization is different for MgpstrModel def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if isinstance(param, (nn.Linear, nn.Conv2d, nn.LayerNorm)): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass # We will verify our results on an image from the IIIT-5k dataset def prepare_img(): url = "https://i.postimg.cc/ZKwLg2Gw/367-14.png" im = Image.open(requests.get(url, stream=True).raw).convert("RGB") return im @require_vision @require_torch class MgpstrModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "alibaba-damo/mgp-str-base" model = MgpstrForSceneTextRecognition.from_pretrained(model_name).to(torch_device) processor = MgpstrProcessor.from_pretrained(model_name) image = prepare_img() inputs = processor(images=image, return_tensors="pt").pixel_values.to(torch_device) # forward pass with torch.no_grad(): outputs = model(inputs) # verify the logits self.assertEqual(outputs.logits[0].shape, torch.Size((1, 27, 38))) out_strs = processor.batch_decode(outputs.logits) expected_text = "ticket" self.assertEqual(out_strs["generated_text"][0], expected_text) expected_slice = torch.tensor( [[[-39.5397, -44.4024, -36.1844], [-61.4709, -63.8639, -58.3454], [-74.0225, -68.5494, -71.2164]]], device=torch_device, ) self.assertTrue(torch.allclose(outputs.logits[0][:, 1:4, 1:4], expected_slice, atol=1e-4))
transformers/tests/models/mgp_str/test_modeling_mgp_str.py/0
{ "file_path": "transformers/tests/models/mgp_str/test_modeling_mgp_str.py", "repo_id": "transformers", "token_count": 4204 }
399
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch MobileNetV1 model. """ import unittest from transformers import MobileNetV1Config from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetV1ForImageClassification, MobileNetV1Model from transformers.models.mobilenet_v1.modeling_mobilenet_v1 import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetV1ImageProcessor class MobileNetV1ConfigTester(ConfigTester): def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "tf_padding")) self.parent.assertTrue(hasattr(config, "depth_multiplier")) class MobileNetV1ModelTester: def __init__( self, parent, batch_size=13, num_channels=3, image_size=32, depth_multiplier=0.25, min_depth=8, tf_padding=True, last_hidden_size=1024, output_stride=32, hidden_act="relu6", classifier_dropout_prob=0.1, initializer_range=0.02, is_training=True, use_labels=True, num_labels=10, scope=None, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.depth_multiplier = depth_multiplier self.min_depth = min_depth self.tf_padding = tf_padding self.last_hidden_size = int(last_hidden_size * depth_multiplier) self.output_stride = output_stride self.hidden_act = hidden_act self.classifier_dropout_prob = classifier_dropout_prob self.use_labels = use_labels self.is_training = is_training self.num_labels = num_labels self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None pixel_labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) pixel_labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels, pixel_labels def get_config(self): return MobileNetV1Config( num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, min_depth=self.min_depth, tf_padding=self.tf_padding, hidden_act=self.hidden_act, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values, labels, pixel_labels): model = MobileNetV1Model(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def create_and_check_for_image_classification(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.num_labels model = MobileNetV1ForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels, pixel_labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class MobileNetV1ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as MobileNetV1 does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (MobileNetV1Model, MobileNetV1ForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": MobileNetV1Model, "image-classification": MobileNetV1ForImageClassification} if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = False def setUp(self): self.model_tester = MobileNetV1ModelTester(self) self.config_tester = MobileNetV1ConfigTester(self, config_class=MobileNetV1Config, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="MobileNetV1 does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="MobileNetV1 does not support input and output embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="MobileNetV1 does not output attentions") def test_attention_outputs(self): pass def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_stages = 26 self.assertEqual(len(hidden_states), expected_num_stages) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = MobileNetV1Model.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class MobileNetV1ModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( MobileNetV1ImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224") if is_vision_available() else None ) @slow def test_inference_image_classification_head(self): model = MobileNetV1ForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1001)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-4.1739, -1.1233, 3.1205]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
transformers/tests/models/mobilenet_v1/test_modeling_mobilenet_v1.py/0
{ "file_path": "transformers/tests/models/mobilenet_v1/test_modeling_mobilenet_v1.py", "repo_id": "transformers", "token_count": 3816 }
400
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import unittest from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class OpenAIGPTTokenizationTest(TokenizerTesterMixin, unittest.TestCase): """Tests OpenAIGPTTokenizer that uses BERT BasicTokenizer.""" tokenizer_class = OpenAIGPTTokenizer rust_tokenizer_class = OpenAIGPTTokenizerFast test_rust_tokenizer = True test_seq2seq = False def setUp(self): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "l o", "lo w", "e r</w>", ""] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens)) with open(self.merges_file, "w") as fp: fp.write("\n".join(merges)) def get_input_output_texts(self, tokenizer): return "lower newer", "lower newer" def test_full_tokenizer(self): tokenizer = OpenAIGPTTokenizer(self.vocab_file, self.merges_file) text = "lower" bpe_tokens = ["low", "er</w>"] tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + ["<unk>"] input_bpe_tokens = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) def test_padding(self, max_length=15): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) # Simple input s = "This is a simple input" s2 = ["This is a simple input 1", "This is a simple input 2"] p = ("This is a simple input", "This is a pair") p2 = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(ValueError, tokenizer_r.encode, s, max_length=max_length, padding="max_length") # Simple input self.assertRaises(ValueError, tokenizer_r.encode_plus, s, max_length=max_length, padding="max_length") # Simple input self.assertRaises( ValueError, tokenizer_r.batch_encode_plus, s2, max_length=max_length, padding="max_length", ) # Pair input self.assertRaises(ValueError, tokenizer_r.encode, p, max_length=max_length, padding="max_length") # Pair input self.assertRaises(ValueError, tokenizer_r.encode_plus, p, max_length=max_length, padding="max_length") # Pair input self.assertRaises( ValueError, tokenizer_r.batch_encode_plus, p2, max_length=max_length, padding="max_length", ) # tokenizer has no padding token def test_padding_different_model_input_name(self): pass @require_ftfy @require_spacy @require_tokenizers class OpenAIGPTTokenizationTestWithSpacy(OpenAIGPTTokenizationTest): """Tests OpenAIGPTTokenizer that uses SpaCy and ftfy.""" pass
transformers/tests/models/openai/test_tokenization_openai.py/0
{ "file_path": "transformers/tests/models/openai/test_tokenization_openai.py", "repo_id": "transformers", "token_count": 2406 }
401
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team, The Microsoft Research team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import tempfile import unittest from transformers import ProphetNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( ProphetNetDecoder, ProphetNetEncoder, ProphetNetForCausalLM, ProphetNetForConditionalGeneration, ProphetNetModel, ProphetNetTokenizer, ) from transformers.modeling_outputs import BaseModelOutput class ProphetNetModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, hidden_size=16, encoder_seq_length=7, decoder_seq_length=9, # For common tests is_training=True, use_attention_mask=True, use_labels=True, decoder_start_token_id=0, encoder_ffn_dim=32, num_encoder_layers=2, num_encoder_attention_heads=4, decoder_ffn_dim=32, num_decoder_layers=2, num_decoder_attention_heads=4, max_position_embeddings=30, is_encoder_decoder=True, pad_token_id=0, bos_token_id=1, eos_token_id=2, ngram=2, num_buckets=32, relative_max_distance=128, disable_ngram_loss=False, scope=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_decoder_layers self.num_encoder_layers = num_encoder_layers self.num_decoder_layers = num_decoder_layers self.decoder_ffn_dim = decoder_ffn_dim self.encoder_ffn_dim = encoder_ffn_dim self.num_attention_heads = num_decoder_attention_heads self.num_encoder_attention_heads = num_encoder_attention_heads self.num_decoder_attention_heads = num_decoder_attention_heads self.eos_token_id = eos_token_id self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.ngram = ngram self.num_buckets = num_buckets self.relative_max_distance = relative_max_distance self.disable_ngram_loss = disable_ngram_loss self.max_position_embeddings = max_position_embeddings self.is_encoder_decoder = is_encoder_decoder self.scope = None self.decoder_key_length = decoder_seq_length self.base_model_out_len = 7 self.num_hidden_states_types = 3 # encoder, decoder_main, decoder_ngram self.decoder_attention_idx = 2 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None decoder_attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) decoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = self.get_config() return ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) def get_config(self): return ProphetNetConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_encoder_layers=self.num_encoder_layers, num_decoder_layers=self.num_decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, encoder_ffn_dim=self.encoder_ffn_dim, num_encoder_attention_heads=self.num_encoder_attention_heads, num_decoder_attention_heads=self.num_decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ngram=self.ngram, num_buckets=self.num_buckets, relative_max_distance=self.relative_max_distance, disable_ngram_loss=self.disable_ngram_loss, max_position_embeddings=self.max_position_embeddings, is_encoder_decoder=self.is_encoder_decoder, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = self.prepare_config_and_inputs() encoder_hidden_states = floats_tensor([self.batch_size, self.encoder_seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) return ( config, decoder_input_ids, decoder_attention_mask, encoder_hidden_states, encoder_attention_mask, lm_labels, ) def check_prepare_lm_labels_via_shift_left( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = ProphetNetModel(config=config) model.to(torch_device) model.eval() # make sure that lm_labels are correctly padded from the right lm_labels.masked_fill_((lm_labels == self.decoder_start_token_id), self.eos_token_id) # add casaul pad token mask triangular_mask = torch.tril(lm_labels.new_ones(lm_labels.shape)).logical_not() lm_labels.masked_fill_(triangular_mask, self.pad_token_id) decoder_input_ids = model._shift_right(lm_labels) for i, (decoder_input_ids_slice, lm_labels_slice) in enumerate(zip(decoder_input_ids, lm_labels)): # first item self.parent.assertEqual(decoder_input_ids_slice[0].item(), self.decoder_start_token_id) if i < decoder_input_ids_slice.shape[-1]: if i < decoder_input_ids.shape[-1] - 1: # items before diagonal self.parent.assertListEqual( decoder_input_ids_slice[1 : i + 1].tolist(), lm_labels_slice[:i].tolist() ) # pad items after diagonal if i < decoder_input_ids.shape[-1] - 2: self.parent.assertListEqual( decoder_input_ids_slice[i + 2 :].tolist(), lm_labels_slice[i + 1 : -1].tolist() ) else: # all items after square self.parent.assertListEqual(decoder_input_ids_slice[1:].tolist(), lm_labels_slice[:-1].tolist()) def create_and_check_model( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = ProphetNetModel(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) result = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) decoder_output = result.last_hidden_state decoder_past = result.past_key_values encoder_output = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size)) self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size)) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(decoder_past), config.num_decoder_layers) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0]), 4) # cross-attention + uni-directional self-attention def create_and_check_with_lm_head( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = ProphetNetForConditionalGeneration(config=config).to(torch_device).eval() outputs = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, labels=lm_labels, ) self.parent.assertEqual(len(outputs), 5) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.decoder_seq_length, self.vocab_size)) self.parent.assertEqual(outputs["loss"].size(), ()) def create_and_check_causal_lm_decoder( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = ProphetNetForCausalLM(config=config).to(torch_device).eval() outputs = model( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, labels=lm_labels, ) self.parent.assertEqual(len(outputs), 4) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.decoder_seq_length, self.vocab_size)) self.parent.assertEqual(outputs["loss"].size(), ()) def create_and_check_generate_with_past_key_value_states( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = ProphetNetForConditionalGeneration(config=config).to(torch_device).eval() torch.manual_seed(0) output_without_past_cache = model.generate( input_ids[:1], num_beams=2, max_length=5, do_sample=True, use_cache=False ) torch.manual_seed(0) output_with_past_cache = model.generate(input_ids[:1], num_beams=2, max_length=5, do_sample=True) self.parent.assertTrue(torch.all(output_with_past_cache == output_without_past_cache)) def create_and_check_decoder_generate_with_past_key_value_states( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = ProphetNetForCausalLM(config=config).to(torch_device).eval() torch.manual_seed(0) output_without_past_cache = model.generate( input_ids[:1], num_beams=2, max_length=10, do_sample=True, use_cache=False ) torch.manual_seed(0) output_with_past_cache = model.generate(input_ids[:1], num_beams=2, max_length=10, do_sample=True) self.parent.assertTrue(torch.all(output_with_past_cache == output_without_past_cache)) def create_and_check_model_fp16_forward( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = ProphetNetModel(config=config).to(torch_device).half().eval() output = model(input_ids, decoder_input_ids=input_ids, attention_mask=attention_mask)["last_hidden_state"] self.parent.assertFalse(torch.isnan(output).any().item()) def create_and_check_encoder_decoder_shared_weights( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): for model_class in [ProphetNetModel, ProphetNetForConditionalGeneration]: torch.manual_seed(0) model = model_class(config=config).to(torch_device).eval() # load state dict copies weights but does not tie them if model_class == ProphetNetForConditionalGeneration: model.prophetnet.encoder.load_state_dict(model.prophetnet.decoder.state_dict(), strict=False) else: model.encoder.load_state_dict(model.decoder.state_dict(), strict=False) torch.manual_seed(0) tied_config = copy.deepcopy(config) tied_config.tie_encoder_decoder = True tied_model = model_class(config=tied_config).to(torch_device).eval() model_result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) tied_model_result = tied_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) # check that models has less parameters self.parent.assertLess( sum(p.numel() for p in tied_model.parameters()), sum(p.numel() for p in model.parameters()) ) random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item() # check that outputs are equal self.parent.assertTrue( torch.allclose( model_result[0][0, :, random_slice_idx], tied_model_result[0][0, :, random_slice_idx], atol=1e-4 ) ) # check that outputs after saving and loading are equal with tempfile.TemporaryDirectory() as tmpdirname: tied_model.save_pretrained(tmpdirname) tied_model = model_class.from_pretrained(tmpdirname) tied_model.to(torch_device) tied_model.eval() # check that models has less parameters self.parent.assertLess( sum(p.numel() for p in tied_model.parameters()), sum(p.numel() for p in model.parameters()) ) random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item() tied_model_result = tied_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) # check that outputs are equal self.parent.assertTrue( torch.allclose( model_result[0][0, :, random_slice_idx], tied_model_result[0][0, :, random_slice_idx], atol=1e-4, ) ) def check_fast_integration( self, config, *args, ): input_ids = torch.tensor([[7, 4, 78, 0, 24, 52, 43]], device=torch_device, dtype=torch.long) decoder_input_ids = torch.tensor([[12, 62, 25, 11, 47, 15, 14]], device=torch_device, dtype=torch.long) attention_mask = torch.tensor([[1, 1, 1, 0, 1, 0, 0]], device=torch_device, dtype=torch.long) decoder_attention_mask = torch.tensor([[1, 1, 1, 0, 0, 1, 0]], device=torch_device, dtype=torch.long) lm_labels = torch.tensor([[62, 25, 11, 47, 15, 14, 24]], device=torch_device, dtype=torch.long) torch.manual_seed(0) config.ngram = 4 model = ProphetNetForConditionalGeneration(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, labels=lm_labels, ) self.parent.assertTrue(torch.allclose(result.loss, torch.tensor(4.5892, device=torch_device), atol=1e-3)) expected_logit_slice = torch.tensor( [-0.0184, 0.0758, -0.0543, -0.0093, 0.0050, -0.0660, -0.1453], device=torch_device ) self.parent.assertTrue(torch.allclose(result.logits[0, :, 1], expected_logit_slice, atol=1e-3)) def check_model_with_attn_mask(self, config, input_ids, decoder_input_ids, *args): model = ProphetNetModel(config=config) model.to(torch_device) model.eval() outputs_no_mask = model(input_ids=input_ids[:, :5], decoder_input_ids=decoder_input_ids[:, :5]) attention_mask = torch.ones_like(input_ids) decoder_attention_mask = torch.ones_like(decoder_input_ids) attention_mask[:, 5:] = 0 outputs_with_mask = model( input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) # check encoder self.parent.assertTrue( torch.allclose( outputs_no_mask.encoder_last_hidden_state[0, :, 0], outputs_with_mask.encoder_last_hidden_state[0, :5, 0], atol=1e-3, ) ) # check decoder # main stream self.parent.assertTrue( torch.allclose( outputs_no_mask.last_hidden_state[0, :, 0], outputs_with_mask.last_hidden_state[0, :5, 0], atol=1e-3 ) ) # predict stream self.parent.assertTrue( torch.allclose( outputs_no_mask.last_hidden_state_ngram[0, :5, 0], outputs_with_mask.last_hidden_state_ngram[0, :5, 0], atol=1e-2, ) ) def check_causal_lm_from_pretrained( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, *args ): model = ProphetNetForConditionalGeneration(config).to(torch_device).eval() with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) decoder = ProphetNetForCausalLM.from_pretrained(tmp_dirname).to(torch_device) encoder_hidden_states = model.prophetnet.encoder(input_ids).last_hidden_state model_outputs = model( encoder_outputs=BaseModelOutput(last_hidden_state=encoder_hidden_states), decoder_input_ids=decoder_input_ids, ) dec_outputs = decoder(encoder_hidden_states=encoder_hidden_states, input_ids=decoder_input_ids) self.parent.assertTrue( torch.allclose( model_outputs.logits[0, :5], dec_outputs.logits[0, :5], atol=1e-3, ) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "use_cache": False, } return config, inputs_dict class ProphetNetStandaloneDecoderModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, hidden_size=16, encoder_seq_length=7, decoder_seq_length=7, # For common tests is_training=True, is_decoder=True, use_attention_mask=True, add_cross_attention=False, use_cache=False, use_labels=True, decoder_start_token_id=0, encoder_ffn_dim=32, num_encoder_layers=2, num_encoder_attention_heads=4, decoder_ffn_dim=32, num_decoder_layers=2, num_decoder_attention_heads=4, max_position_embeddings=30, is_encoder_decoder=False, pad_token_id=0, bos_token_id=1, eos_token_id=2, ngram=2, num_buckets=32, relative_max_distance=128, disable_ngram_loss=False, scope=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_decoder_layers self.num_encoder_layers = num_encoder_layers self.num_decoder_layers = num_decoder_layers self.decoder_ffn_dim = decoder_ffn_dim self.encoder_ffn_dim = encoder_ffn_dim self.num_attention_heads = num_decoder_attention_heads self.num_encoder_attention_heads = num_encoder_attention_heads self.num_decoder_attention_heads = num_decoder_attention_heads self.eos_token_id = eos_token_id self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.ngram = ngram self.num_buckets = num_buckets self.relative_max_distance = relative_max_distance self.use_cache = use_cache self.disable_ngram_loss = disable_ngram_loss self.max_position_embeddings = max_position_embeddings self.add_cross_attention = add_cross_attention self.is_encoder_decoder = is_encoder_decoder self.scope = None self.decoder_key_length = decoder_seq_length self.base_model_out_len = 2 self.num_hidden_states_types = 2 # decoder_main, decoder_ngram self.decoder_attention_idx = 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) config = ProphetNetConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_encoder_layers=self.num_encoder_layers, num_decoder_layers=self.num_decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, encoder_ffn_dim=self.encoder_ffn_dim, num_encoder_attention_heads=self.num_encoder_attention_heads, num_decoder_attention_heads=self.num_decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, use_cache=self.use_cache, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ngram=self.ngram, num_buckets=self.num_buckets, relative_max_distance=self.relative_max_distance, disable_ngram_loss=self.disable_ngram_loss, max_position_embeddings=self.max_position_embeddings, add_cross_attention=self.add_cross_attention, is_encoder_decoder=self.is_encoder_decoder, ) return ( config, input_ids, attention_mask, lm_labels, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, attention_mask, lm_labels, ) = self.prepare_config_and_inputs() encoder_hidden_states = floats_tensor([self.batch_size, self.encoder_seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, lm_labels, ) def create_and_check_decoder_model_past( self, config, input_ids, attention_mask, lm_labels, ): config.use_cache = True model = ProphetNetDecoder(config=config).to(torch_device).eval() # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past_key_values = outputs["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def create_and_check_decoder_model_attention_mask_past( self, config, input_ids, attention_mask, lm_labels, ): model = ProphetNetDecoder(config=config).to(torch_device).eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = input_ids.shape[-1] // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict class ProphetNetStandaloneEncoderModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, hidden_size=16, encoder_seq_length=7, decoder_seq_length=7, # For common tests is_training=True, is_decoder=False, use_attention_mask=True, add_cross_attention=False, use_cache=False, use_labels=True, decoder_start_token_id=0, encoder_ffn_dim=32, num_encoder_layers=2, num_encoder_attention_heads=4, decoder_ffn_dim=32, num_decoder_layers=2, num_decoder_attention_heads=4, max_position_embeddings=30, is_encoder_decoder=False, pad_token_id=0, bos_token_id=1, eos_token_id=2, num_buckets=32, relative_max_distance=128, disable_ngram_loss=False, scope=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_decoder_layers self.num_encoder_layers = num_encoder_layers self.num_decoder_layers = num_decoder_layers self.decoder_ffn_dim = decoder_ffn_dim self.encoder_ffn_dim = encoder_ffn_dim self.num_attention_heads = num_decoder_attention_heads self.num_encoder_attention_heads = num_encoder_attention_heads self.num_decoder_attention_heads = num_decoder_attention_heads self.eos_token_id = eos_token_id self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.num_buckets = num_buckets self.relative_max_distance = relative_max_distance self.use_cache = use_cache self.disable_ngram_loss = disable_ngram_loss self.max_position_embeddings = max_position_embeddings self.add_cross_attention = add_cross_attention self.is_encoder_decoder = is_encoder_decoder self.scope = None self.decoder_key_length = decoder_seq_length self.base_model_out_len = 1 self.num_hidden_states_types = 1 self.decoder_attention_idx = 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) config = ProphetNetConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_encoder_layers=self.num_encoder_layers, num_decoder_layers=self.num_decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, encoder_ffn_dim=self.encoder_ffn_dim, num_encoder_attention_heads=self.num_encoder_attention_heads, num_decoder_attention_heads=self.num_decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, use_cache=self.use_cache, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, num_buckets=self.num_buckets, relative_max_distance=self.relative_max_distance, disable_ngram_loss=self.disable_ngram_loss, max_position_embeddings=self.max_position_embeddings, add_cross_attention=self.add_cross_attention, is_encoder_decoder=self.is_encoder_decoder, ) return ( config, input_ids, attention_mask, ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class ProphetNetModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (ProphetNetModel, ProphetNetForConditionalGeneration) if is_torch_available() else () all_generative_model_classes = (ProphetNetForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( { "conversational": ProphetNetForConditionalGeneration, "feature-extraction": ProphetNetModel, "summarization": ProphetNetForConditionalGeneration, "text-generation": ProphetNetForCausalLM, "text2text-generation": ProphetNetForConditionalGeneration, "translation": ProphetNetForConditionalGeneration, } if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = False is_encoder_decoder = True # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `ProphetNetConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def setUp(self): self.model_tester = ProphetNetModelTester(self) self.config_tester = ConfigTester(self, config_class=ProphetNetConfig) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_lm_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_with_lm_head(*config_and_inputs) def test_only_decoder_causal_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_decoder(*config_and_inputs) def test_fast_integration(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_fast_integration(*config_and_inputs) def test_shared_weights(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_encoder_decoder_shared_weights(*config_and_inputs) def test_shift_labels_via_shift_left(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_prepare_lm_labels_via_shift_left(*config_and_inputs) @unittest.skip("Flaky test with no simple resolution. TODO Fix me @patrickvonplaten") def test_decoder_model_generate(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_generate_with_past_key_value_states(*config_and_inputs) def test_encoder_decoder_model_generate(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_generate_with_past_key_value_states(*config_and_inputs) def test_attn_mask_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_model_with_attn_mask(*config_and_inputs) def test_config_save(self): config = self.model_tester.prepare_config_and_inputs()[0] config.add_cross_attention = False with tempfile.TemporaryDirectory() as tmp_dirname: config.save_pretrained(tmp_dirname) config = ProphetNetConfig.from_pretrained(tmp_dirname) self.assertFalse(config.add_cross_attention) def test_causal_lm_from_pretrained(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_causal_lm_from_pretrained(*config_and_inputs) @unittest.skipIf(torch_device == "cpu", "Cant do half precision") def test_fp16_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs) # methods overwrite method in `test_modeling_common.py` def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) correct_outlen = 7 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, (self.model_tester.ngram + 1) * decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(self_attentions[0].shape[-4:]), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def test_retain_grad_hidden_states_attentions(self): # decoder cannot keep gradients config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_attentions = outputs.encoder_attentions[0] encoder_hidden_states.retain_grad() encoder_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(encoder_attentions.grad) def test_generate_with_head_masking(self): """Generating with head_masking has not been implemented for ProphetNet models yet.""" pass @require_torch class ProphetNetStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (ProphetNetDecoder, ProphetNetForCausalLM) if is_torch_available() else () all_generative_model_classes = (ProphetNetForCausalLM,) if is_torch_available() else () test_pruning = False test_resize_embeddings = False is_encoder_decoder = False def setUp(self): self.model_tester = ProphetNetStandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=ProphetNetConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) def test_decoder_model_attn_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) def test_retain_grad_hidden_states_attentions(self): # decoder cannot keep gradients return @unittest.skip("The model doesn't support left padding") # and it's not used enough to be worth fixing :) def test_left_padding_compatibility(self): pass @require_torch class ProphetNetStandaloneEncoderModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (ProphetNetEncoder,) if is_torch_available() else () test_pruning = False test_resize_embeddings = False is_encoder_decoder = False def setUp(self): self.model_tester = ProphetNetStandaloneEncoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=ProphetNetConfig) def test_config(self): self.config_tester.run_common_tests() @require_torch class ProphetNetModelIntegrationTest(unittest.TestCase): @slow def test_pretrained_checkpoint_hidden_states(self): model = ProphetNetForConditionalGeneration.from_pretrained("microsoft/prophetnet-large-uncased") model.to(torch_device) # encoder-decoder outputs encoder_ids = torch.tensor( [ [ 2871, 102, 2048, 3176, 2780, 1997, 2871, 26727, 2169, 2097, 12673, 1996, 8457, 2006, 2049, 8240, 2859, 2799, 1012, 2023, 6512, 2038, 2174, 13977, 2195, 25962, 1012, 102, ] ] ).to(torch_device) decoder_prev_ids = torch.tensor([[102, 2129, 2116, 2372, 2024, 2006, 2169, 1997, 2122, 2048, 2780, 1029]]).to( torch_device ) output = model( input_ids=encoder_ids, attention_mask=None, encoder_outputs=None, decoder_input_ids=decoder_prev_ids, ) output_predited_logits = output[0] expected_shape = torch.Size((1, 12, 30522)) self.assertEqual(output_predited_logits.shape, expected_shape) expected_slice = torch.tensor( [[[-7.7729, -8.0343, -8.26001], [-7.74213, -7.8629, -8.6000], [-7.7328, -7.8269, -8.5264]]] ).to(torch_device) # self.assertTrue(torch.allclose(output_predited_logits[:, :3, :3], expected_slice, atol=1e-4)) assert torch.allclose(output_predited_logits[:, :3, :3], expected_slice, atol=1e-4) # encoder outputs encoder_outputs = model.prophetnet.encoder(encoder_ids)[0] expected_encoder_outputs_slice = torch.tensor( [[[-0.2526, -0.1951, -0.2185], [-0.8923, 0.2992, -0.4623], [-0.4585, 0.0165, -0.6652]]] ).to(torch_device) expected_shape_encoder = torch.Size((1, 28, 1024)) self.assertEqual(encoder_outputs.shape, expected_shape_encoder) # self.assertTrue(torch.allclose(encoder_outputs[:, :3, :3], expected_encoder_outputs_slice, atol=1e-4)) assert torch.allclose(encoder_outputs[:, :3, :3], expected_encoder_outputs_slice, atol=1e-4) # decoder outputs decoder_outputs = model.prophetnet.decoder(decoder_prev_ids, encoder_hidden_states=encoder_outputs) predicting_streams = decoder_outputs[1].view(1, model.config.ngram, 12, -1) predicting_streams_logits = model.lm_head(predicting_streams) next_first_stream_logits = predicting_streams_logits[:, 0] # self.assertTrue(torch.allclose(next_first_stream_logits[:, :3, :3], expected_slice, atol=1e-4)) assert torch.allclose(next_first_stream_logits[:, :3, :3], expected_slice, atol=1e-4) @slow def test_cnndm_inference(self): model = ProphetNetForConditionalGeneration.from_pretrained("microsoft/prophetnet-large-uncased-cnndm") model.config.max_length = 512 model.to(torch_device) tokenizer = ProphetNetTokenizer.from_pretrained("microsoft/prophetnet-large-uncased-cnndm") ARTICLE_TO_SUMMARIZE = ( "USTC was founded in Beijing by the Chinese Academy of Sciences (CAS) in September 1958. The Director of" " CAS, Mr. Guo Moruo was appointed the first president of USTC. USTC's founding mission was to develop a" " high-level science and technology workforce, as deemed critical for development of China's economy," ' defense, and science and technology education. The establishment was hailed as "A Major Event in the' ' History of Chinese Education and Science." CAS has supported USTC by combining most of its institutes' " with the departments of the university. USTC is listed in the top 16 national key universities, becoming" " the youngest national key university.".lower() ) input_ids = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=511, return_tensors="pt").input_ids input_ids = input_ids.to(torch_device) summary_ids = model.generate( input_ids, num_beams=4, length_penalty=1.0, no_repeat_ngram_size=3, early_stopping=True ) EXPECTED_SUMMARIZE_512 = ( "us ##tc was founded by the chinese academy of sciences ( cas ) in 1958 . [X_SEP] us ##tc is listed in the" " top 16 national key universities ." ) generated_titles = [ " ".join(tokenizer.convert_ids_to_tokens(g, skip_special_tokens=True)) for g in summary_ids ] self.assertListEqual( [EXPECTED_SUMMARIZE_512], generated_titles, ) input_ids = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=99, return_tensors="pt").input_ids input_ids = input_ids.to(torch_device) # actually 98 tokens are used. max_length=100 contains bos and eos. summary_ids = model.generate( input_ids, num_beams=4, length_penalty=1.0, no_repeat_ngram_size=3, early_stopping=True ) EXPECTED_SUMMARIZE_100 = ( r"us ##tc was founded in beijing by the chinese academy of sciences ( cas ) in 1958 . [X_SEP] us ##tc " "'" " s founding mission was to develop a high - level science and technology workforce . [X_SEP]" ' establishment hailed as " a major event in the history of chinese education and science "' ) generated_titles = [ " ".join(tokenizer.convert_ids_to_tokens(g, skip_special_tokens=True)) for g in summary_ids ] self.assertListEqual( [EXPECTED_SUMMARIZE_100], generated_titles, ) @slow def test_question_gen_inference(self): model = ProphetNetForConditionalGeneration.from_pretrained("microsoft/prophetnet-large-uncased-squad-qg") model.to(torch_device) tokenizer = ProphetNetTokenizer.from_pretrained("microsoft/prophetnet-large-uncased-squad-qg") INPUTS = [ "Bill Gates [SEP] Microsoft was founded by Bill Gates and Paul Allen on April 4, 1975.", "1975 [SEP] Microsoft was founded by Bill Gates and Paul Allen on April 4, 1975.", "April 4, 1975 [SEP] Microsoft was founded by Bill Gates and Paul Allen on April 4, 1975.", ] input_ids = tokenizer(INPUTS, truncation=True, padding=True, return_tensors="pt").input_ids input_ids = input_ids.to(torch_device) gen_output = model.generate(input_ids, num_beams=5, early_stopping=True) generated_questions = tokenizer.batch_decode(gen_output, skip_special_tokens=True) EXPECTED_QUESTIONS = [ "along with paul allen, who founded microsoft?", "what year was microsoft founded?", "when was microsoft founded?", ] self.assertListEqual( EXPECTED_QUESTIONS, generated_questions, )
transformers/tests/models/prophetnet/test_modeling_prophetnet.py/0
{ "file_path": "transformers/tests/models/prophetnet/test_modeling_prophetnet.py", "repo_id": "transformers", "token_count": 25713 }
402
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch REALM model. """ import copy import unittest import numpy as np from transformers import RealmConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( RealmEmbedder, RealmForOpenQA, RealmKnowledgeAugEncoder, RealmReader, RealmRetriever, RealmScorer, RealmTokenizer, ) class RealmModelTester: def __init__( self, parent, batch_size=13, retriever_proj_size=128, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, layer_norm_eps=1e-12, span_hidden_size=50, max_span_width=10, reader_layer_norm_eps=1e-3, reader_beam_size=4, reader_seq_len=288 + 32, num_block_records=13353718, searcher_beam_size=8, searcher_seq_len=64, num_labels=3, num_choices=4, num_candidates=10, scope=None, ): # General config self.parent = parent self.batch_size = batch_size self.retriever_proj_size = retriever_proj_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps # Reader config self.span_hidden_size = span_hidden_size self.max_span_width = max_span_width self.reader_layer_norm_eps = reader_layer_norm_eps self.reader_beam_size = reader_beam_size self.reader_seq_len = reader_seq_len # Searcher config self.num_block_records = num_block_records self.searcher_beam_size = searcher_beam_size self.searcher_seq_len = searcher_seq_len self.num_labels = num_labels self.num_choices = num_choices self.num_candidates = num_candidates self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) candiate_input_ids = ids_tensor([self.batch_size, self.num_candidates, self.seq_length], self.vocab_size) reader_input_ids = ids_tensor([self.reader_beam_size, self.reader_seq_len], self.vocab_size) input_mask = None candiate_input_mask = None reader_input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) candiate_input_mask = random_attention_mask([self.batch_size, self.num_candidates, self.seq_length]) reader_input_mask = random_attention_mask([self.reader_beam_size, self.reader_seq_len]) token_type_ids = None candidate_token_type_ids = None reader_token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) candidate_token_type_ids = ids_tensor( [self.batch_size, self.num_candidates, self.seq_length], self.type_vocab_size ) reader_token_type_ids = ids_tensor([self.reader_beam_size, self.reader_seq_len], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() # inputs with additional num_candidates axis. scorer_encoder_inputs = (candiate_input_ids, candiate_input_mask, candidate_token_type_ids) # reader inputs reader_inputs = (reader_input_ids, reader_input_mask, reader_token_type_ids) return ( config, input_ids, token_type_ids, input_mask, scorer_encoder_inputs, reader_inputs, sequence_labels, token_labels, choice_labels, ) def get_config(self): return RealmConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, retriever_proj_size=self.retriever_proj_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, num_candidates=self.num_candidates, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, ) def create_and_check_embedder( self, config, input_ids, token_type_ids, input_mask, scorer_encoder_inputs, reader_inputs, sequence_labels, token_labels, choice_labels, ): model = RealmEmbedder(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.projected_score.shape, (self.batch_size, self.retriever_proj_size)) def create_and_check_encoder( self, config, input_ids, token_type_ids, input_mask, scorer_encoder_inputs, reader_inputs, sequence_labels, token_labels, choice_labels, ): model = RealmKnowledgeAugEncoder(config=config) model.to(torch_device) model.eval() relevance_score = floats_tensor([self.batch_size, self.num_candidates]) result = model( scorer_encoder_inputs[0], attention_mask=scorer_encoder_inputs[1], token_type_ids=scorer_encoder_inputs[2], relevance_score=relevance_score, labels=token_labels, ) self.parent.assertEqual( result.logits.shape, (self.batch_size * self.num_candidates, self.seq_length, self.vocab_size) ) def create_and_check_reader( self, config, input_ids, token_type_ids, input_mask, scorer_encoder_inputs, reader_inputs, sequence_labels, token_labels, choice_labels, ): model = RealmReader(config=config) model.to(torch_device) model.eval() relevance_score = floats_tensor([self.reader_beam_size]) result = model( reader_inputs[0], attention_mask=reader_inputs[1], token_type_ids=reader_inputs[2], relevance_score=relevance_score, ) self.parent.assertEqual(result.block_idx.shape, ()) self.parent.assertEqual(result.candidate.shape, ()) self.parent.assertEqual(result.start_pos.shape, ()) self.parent.assertEqual(result.end_pos.shape, ()) def create_and_check_scorer( self, config, input_ids, token_type_ids, input_mask, scorer_encoder_inputs, reader_inputs, sequence_labels, token_labels, choice_labels, ): model = RealmScorer(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, candidate_input_ids=scorer_encoder_inputs[0], candidate_attention_mask=scorer_encoder_inputs[1], candidate_token_type_ids=scorer_encoder_inputs[2], ) self.parent.assertEqual(result.relevance_score.shape, (self.batch_size, self.num_candidates)) self.parent.assertEqual(result.query_score.shape, (self.batch_size, self.retriever_proj_size)) self.parent.assertEqual( result.candidate_score.shape, (self.batch_size, self.num_candidates, self.retriever_proj_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, scorer_encoder_inputs, reader_inputs, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class RealmModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( RealmEmbedder, RealmKnowledgeAugEncoder, # RealmScorer is excluded from common tests as it is a container model # consisting of two RealmEmbedders & a simple inner product calculation. # RealmScorer ) if is_torch_available() else () ) all_generative_model_classes = () pipeline_model_mapping = {} if is_torch_available() else {} # disable these tests because there is no base_model in Realm test_save_load_fast_init_from_base = False test_save_load_fast_init_to_base = False def setUp(self): self.test_pruning = False self.model_tester = RealmModelTester(self) self.config_tester = ConfigTester(self, config_class=RealmConfig) def test_config(self): self.config_tester.run_common_tests() def test_embedder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_embedder(*config_and_inputs) def test_encoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_encoder(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_embedder(*config_and_inputs) self.model_tester.create_and_check_encoder(*config_and_inputs) def test_scorer(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_scorer(*config_and_inputs) def test_training(self): if not self.model_tester.is_training: return config, *inputs = self.model_tester.prepare_config_and_inputs() input_ids, token_type_ids, input_mask, scorer_encoder_inputs = inputs[0:4] config.return_dict = True tokenizer = RealmTokenizer.from_pretrained("google/realm-orqa-nq-openqa") # RealmKnowledgeAugEncoder training model = RealmKnowledgeAugEncoder(config) model.to(torch_device) model.train() inputs_dict = { "input_ids": scorer_encoder_inputs[0].to(torch_device), "attention_mask": scorer_encoder_inputs[1].to(torch_device), "token_type_ids": scorer_encoder_inputs[2].to(torch_device), "relevance_score": floats_tensor([self.model_tester.batch_size, self.model_tester.num_candidates]), } inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) inputs = inputs_dict loss = model(**inputs).loss loss.backward() # RealmForOpenQA training openqa_config = copy.deepcopy(config) openqa_config.vocab_size = 30522 # the retrieved texts will inevitably have more than 99 vocabs. openqa_config.num_block_records = 5 openqa_config.searcher_beam_size = 2 block_records = np.array( [ b"This is the first record.", b"This is the second record.", b"This is the third record.", b"This is the fourth record.", b"This is the fifth record.", ], dtype=object, ) retriever = RealmRetriever(block_records, tokenizer) model = RealmForOpenQA(openqa_config, retriever) model.to(torch_device) model.train() inputs_dict = { "input_ids": input_ids[:1].to(torch_device), "attention_mask": input_mask[:1].to(torch_device), "token_type_ids": token_type_ids[:1].to(torch_device), "answer_ids": input_ids[:1].tolist(), } inputs = self._prepare_for_class(inputs_dict, RealmForOpenQA) loss = model(**inputs).reader_output.loss loss.backward() # Test model.block_embedding_to device = torch.device("cpu") model.block_embedding_to(device) loss = model(**inputs).reader_output.loss loss.backward() self.assertEqual(model.block_emb.device.type, device.type) @slow def test_embedder_from_pretrained(self): model = RealmEmbedder.from_pretrained("google/realm-cc-news-pretrained-embedder") self.assertIsNotNone(model) @slow def test_encoder_from_pretrained(self): model = RealmKnowledgeAugEncoder.from_pretrained("google/realm-cc-news-pretrained-encoder") self.assertIsNotNone(model) @slow def test_open_qa_from_pretrained(self): model = RealmForOpenQA.from_pretrained("google/realm-orqa-nq-openqa") self.assertIsNotNone(model) @slow def test_reader_from_pretrained(self): model = RealmReader.from_pretrained("google/realm-orqa-nq-reader") self.assertIsNotNone(model) @slow def test_scorer_from_pretrained(self): model = RealmScorer.from_pretrained("google/realm-cc-news-pretrained-scorer") self.assertIsNotNone(model) @require_torch class RealmModelIntegrationTest(unittest.TestCase): @slow def test_inference_embedder(self): retriever_projected_size = 128 model = RealmEmbedder.from_pretrained("google/realm-cc-news-pretrained-embedder") input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]]) output = model(input_ids)[0] expected_shape = torch.Size((1, retriever_projected_size)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor([[-0.0714, -0.0837, -0.1314]]) self.assertTrue(torch.allclose(output[:, :3], expected_slice, atol=1e-4)) @slow def test_inference_encoder(self): num_candidates = 2 vocab_size = 30522 model = RealmKnowledgeAugEncoder.from_pretrained( "google/realm-cc-news-pretrained-encoder", num_candidates=num_candidates ) input_ids = torch.tensor([[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) relevance_score = torch.tensor([[0.3, 0.7]], dtype=torch.float32) output = model(input_ids, relevance_score=relevance_score)[0] expected_shape = torch.Size((2, 6, vocab_size)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor([[[-11.0888, -11.2544], [-10.2170, -10.3874]]]) self.assertTrue(torch.allclose(output[1, :2, :2], expected_slice, atol=1e-4)) @slow def test_inference_open_qa(self): from transformers.models.realm.retrieval_realm import RealmRetriever tokenizer = RealmTokenizer.from_pretrained("google/realm-orqa-nq-openqa") retriever = RealmRetriever.from_pretrained("google/realm-orqa-nq-openqa") model = RealmForOpenQA.from_pretrained( "google/realm-orqa-nq-openqa", retriever=retriever, ) question = "Who is the pioneer in modern computer science?" question = tokenizer( [question], padding=True, truncation=True, max_length=model.config.searcher_seq_len, return_tensors="pt", ).to(model.device) predicted_answer_ids = model(**question).predicted_answer_ids predicted_answer = tokenizer.decode(predicted_answer_ids) self.assertEqual(predicted_answer, "alan mathison turing") @slow def test_inference_reader(self): config = RealmConfig(reader_beam_size=2, max_span_width=3) model = RealmReader.from_pretrained("google/realm-orqa-nq-reader", config=config) concat_input_ids = torch.arange(10).view((2, 5)) concat_token_type_ids = torch.tensor([[0, 0, 1, 1, 1], [0, 0, 1, 1, 1]], dtype=torch.int64) concat_block_mask = torch.tensor([[0, 0, 1, 1, 0], [0, 0, 1, 1, 0]], dtype=torch.int64) relevance_score = torch.tensor([0.3, 0.7], dtype=torch.float32) output = model( concat_input_ids, token_type_ids=concat_token_type_ids, relevance_score=relevance_score, block_mask=concat_block_mask, return_dict=True, ) block_idx_expected_shape = torch.Size(()) start_pos_expected_shape = torch.Size((1,)) end_pos_expected_shape = torch.Size((1,)) self.assertEqual(output.block_idx.shape, block_idx_expected_shape) self.assertEqual(output.start_pos.shape, start_pos_expected_shape) self.assertEqual(output.end_pos.shape, end_pos_expected_shape) expected_block_idx = torch.tensor(1) expected_start_pos = torch.tensor(3) expected_end_pos = torch.tensor(3) self.assertTrue(torch.allclose(output.block_idx, expected_block_idx, atol=1e-4)) self.assertTrue(torch.allclose(output.start_pos, expected_start_pos, atol=1e-4)) self.assertTrue(torch.allclose(output.end_pos, expected_end_pos, atol=1e-4)) @slow def test_inference_scorer(self): num_candidates = 2 model = RealmScorer.from_pretrained("google/realm-cc-news-pretrained-scorer", num_candidates=num_candidates) input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]]) candidate_input_ids = torch.tensor([[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) output = model(input_ids, candidate_input_ids=candidate_input_ids)[0] expected_shape = torch.Size((1, 2)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor([[0.7410, 0.7170]]) self.assertTrue(torch.allclose(output, expected_slice, atol=1e-4))
transformers/tests/models/realm/test_modeling_realm.py/0
{ "file_path": "transformers/tests/models/realm/test_modeling_realm.py", "repo_id": "transformers", "token_count": 9513 }
403
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch ResNet model. """ import unittest from transformers import ResNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ResNetBackbone, ResNetForImageClassification, ResNetModel from transformers.models.resnet.modeling_resnet import RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class ResNetModelTester: def __init__( self, parent, batch_size=3, image_size=32, num_channels=3, embeddings_size=10, hidden_sizes=[10, 20, 30, 40], depths=[1, 1, 2, 1], is_training=True, use_labels=True, hidden_act="relu", num_labels=3, scope=None, out_features=["stage2", "stage3", "stage4"], out_indices=[2, 3, 4], ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.embeddings_size = embeddings_size self.hidden_sizes = hidden_sizes self.depths = depths self.is_training = is_training self.use_labels = use_labels self.hidden_act = hidden_act self.num_labels = num_labels self.scope = scope self.num_stages = len(hidden_sizes) self.out_features = out_features self.out_indices = out_indices def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return ResNetConfig( num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, out_features=self.out_features, out_indices=self.out_indices, ) def create_and_check_model(self, config, pixel_values, labels): model = ResNetModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), ) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.num_labels model = ResNetForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_backbone(self, config, pixel_values, labels): model = ResNetBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify feature maps self.parent.assertEqual(len(result.feature_maps), len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[1], 4, 4]) # verify channels self.parent.assertEqual(len(model.channels), len(config.out_features)) self.parent.assertListEqual(model.channels, config.hidden_sizes[1:]) # verify backbone works with out_features=None config.out_features = None model = ResNetBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify feature maps self.parent.assertEqual(len(result.feature_maps), 1) self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[-1], 1, 1]) # verify channels self.parent.assertEqual(len(model.channels), 1) self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]]) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class ResNetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as ResNet does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( ( ResNetModel, ResNetForImageClassification, ResNetBackbone, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"feature-extraction": ResNetModel, "image-classification": ResNetForImageClassification} if is_torch_available() else {} ) fx_compatible = True test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = False def setUp(self): self.model_tester = ResNetModelTester(self) self.config_tester = ConfigTester(self, config_class=ResNetConfig, has_text_modality=False) def test_config(self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def create_and_test_config_common_properties(self): return @unittest.skip(reason="ResNet does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="ResNet does not support input and output embeddings") def test_model_common_attributes(self): pass def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_backbone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*config_and_inputs) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config=config) for name, module in model.named_modules(): if isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)): self.assertTrue( torch.all(module.weight == 1), msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) self.assertTrue( torch.all(module.bias == 0), msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_stages = self.model_tester.num_stages self.assertEqual(len(hidden_states), expected_num_stages + 1) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() layers_type = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: config.layer_type = layer_type inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) @unittest.skip(reason="ResNet does not use feedforward chunking") def test_feed_forward_chunking(self): pass def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ResNetModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class ResNetModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( AutoImageProcessor.from_pretrained(RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None ) @slow def test_inference_image_classification_head(self): model = ResNetForImageClassification.from_pretrained(RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-11.1069, -9.7877, -8.3777]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)) @require_torch class ResNetBackboneTest(BackboneTesterMixin, unittest.TestCase): all_model_classes = (ResNetBackbone,) if is_torch_available() else () has_attentions = False config_class = ResNetConfig def setUp(self): self.model_tester = ResNetModelTester(self)
transformers/tests/models/resnet/test_modeling_resnet.py/0
{ "file_path": "transformers/tests/models/resnet/test_modeling_resnet.py", "repo_id": "transformers", "token_count": 5143 }
404
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch RoFormer model. """ import unittest from transformers import RoFormerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerModel, ) from transformers.models.roformer.modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerSelfAttention, RoFormerSinusoidalPositionalEmbedding, ) class RoFormerModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return RoFormerConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = RoFormerModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = RoFormerModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = RoFormerForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_generate_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): model = RoFormerForCausalLM(config=config).to(torch_device).eval() torch.manual_seed(0) output_without_past_cache = model.generate( input_ids[:1], num_beams=2, max_length=15, do_sample=True, use_cache=False ) torch.manual_seed(0) output_with_past_cache = model.generate(input_ids[:1], num_beams=2, max_length=15, do_sample=True) self.parent.assertTrue(torch.all(output_with_past_cache == output_without_past_cache)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = RoFormerForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = RoFormerForCausalLM(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = RoFormerForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = RoFormerForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = RoFormerForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = RoFormerForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class RoFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( RoFormerModel, RoFormerForMaskedLM, RoFormerForCausalLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, ) if is_torch_available() else () ) all_generative_model_classes = (RoFormerForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": RoFormerModel, "fill-mask": RoFormerForMaskedLM, "question-answering": RoFormerForQuestionAnswering, "text-classification": RoFormerForSequenceClassification, "text-generation": RoFormerForCausalLM, "token-classification": RoFormerForTokenClassification, "zero-shot": RoFormerForSequenceClassification, } if is_torch_available() else {} ) def setUp(self): self.model_tester = RoFormerModelTester(self) self.config_tester = ConfigTester(self, config_class=RoFormerConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_generate_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_generate_causal_lm(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_model_as_decoder_with_default_input_mask(self): # This regression test was failing with PyTorch < 1.3 ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) @slow def test_model_from_pretrained(self): for model_name in ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = RoFormerModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @require_torch class RoFormerModelIntegrationTest(unittest.TestCase): @slow def test_inference_masked_lm(self): model = RoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base") input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]]) with torch.no_grad(): output = model(input_ids)[0] # TODO Replace vocab size vocab_size = 50000 expected_shape = torch.Size((1, 6, vocab_size)) self.assertEqual(output.shape, expected_shape) # TODO Replace values below with what was printed above. expected_slice = torch.tensor( [[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4)) @require_torch class RoFormerSinusoidalPositionalEmbeddingTest(unittest.TestCase): tolerance = 1e-4 def test_basic(self): input_ids = torch.tensor([[4, 10]], dtype=torch.long, device=torch_device) emb1 = RoFormerSinusoidalPositionalEmbedding(num_positions=6, embedding_dim=6).to(torch_device) emb = emb1(input_ids.shape) desired_weights = torch.tensor( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] ).to(torch_device) self.assertTrue( torch.allclose(emb, desired_weights, atol=self.tolerance), msg=f"\nexp:\n{desired_weights}\ngot:\n{emb[0]}\n", ) def test_positional_emb_weights_against_roformer(self): desired_weights = torch.tensor( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ] ).to(torch_device) emb1 = RoFormerSinusoidalPositionalEmbedding(num_positions=512, embedding_dim=512).to(torch_device) weights = emb1.weight.data[:3, :5].to(torch_device) self.assertTrue( torch.allclose(weights, desired_weights, atol=self.tolerance), msg=f"\nexp:\n{desired_weights}\ngot:\n{weights}\n", ) @require_torch class RoFormerSelfAttentionRotaryPositionEmbeddingTest(unittest.TestCase): tolerance = 1e-4 def test_apply_rotary_position_embeddings(self): # 2,12,16,64 query_layer = ( torch.arange(2 * 12 * 16 * 64, dtype=torch.float, device=torch_device).reshape(2, 12, 16, 64) / 100 ).to(torch_device) key_layer = ( -torch.arange(2 * 12 * 16 * 64, dtype=torch.float, device=torch_device).reshape(2, 12, 16, 64) / 100 ).to(torch_device) embed_positions = RoFormerSinusoidalPositionalEmbedding(num_positions=32, embedding_dim=64).to(torch_device) sinusoidal_pos = embed_positions([2, 16, 768])[None, None, :, :] query_layer, key_layer = RoFormerSelfAttention.apply_rotary_position_embeddings( sinusoidal_pos, query_layer, key_layer ) desired_query_layer = torch.tensor( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ] ).to(torch_device) desired_key_layer = torch.tensor( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ] ).to(torch_device) self.assertTrue( torch.allclose(query_layer[0, 0, :6, :8], desired_query_layer, atol=self.tolerance), msg=f"\nexp:\n{desired_query_layer}\ngot:\n{query_layer}\n", ) self.assertTrue( torch.allclose(key_layer[0, 0, :6, :8], desired_key_layer, atol=self.tolerance), msg=f"\nexp:\n{desired_key_layer}\ngot:\n{key_layer}\n", )
transformers/tests/models/roformer/test_modeling_roformer.py/0
{ "file_path": "transformers/tests/models/roformer/test_modeling_roformer.py", "repo_id": "transformers", "token_count": 11379 }
405
# coding=utf-8 # Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import os import random import tempfile import unittest import numpy as np from transformers import Speech2TextFeatureExtractor from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin global_rng = random.Random() # Copied from tests.models.whisper.test_feature_extraction_whisper.floats_list def floats_list(shape, scale=1.0, rng=None, name=None): """Creates a random float32 tensor""" if rng is None: rng = global_rng values = [] for batch_idx in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values @require_torch @require_torchaudio class Speech2TextFeatureExtractionTester(unittest.TestCase): def __init__( self, parent, batch_size=7, min_seq_length=400, max_seq_length=2000, feature_size=24, num_mel_bins=24, padding_value=0.0, sampling_rate=16_000, return_attention_mask=True, do_normalize=True, ): self.parent = parent self.batch_size = batch_size self.min_seq_length = min_seq_length self.max_seq_length = max_seq_length self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) self.feature_size = feature_size self.num_mel_bins = num_mel_bins self.padding_value = padding_value self.sampling_rate = sampling_rate self.return_attention_mask = return_attention_mask self.do_normalize = do_normalize def prepare_feat_extract_dict(self): return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def prepare_inputs_for_common(self, equal_length=False, numpify=False): def _flatten(list_of_lists): return list(itertools.chain(*list_of_lists)) if equal_length: speech_inputs = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)] else: # make sure that inputs increase in size speech_inputs = [ floats_list((x, self.feature_size)) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff) ] if numpify: speech_inputs = [np.asarray(x) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class Speech2TextFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase): feature_extraction_class = Speech2TextFeatureExtractor def setUp(self): self.feat_extract_tester = Speech2TextFeatureExtractionTester(self) def _check_zero_mean_unit_variance(self, input_vector): self.assertTrue(np.all(np.mean(input_vector, axis=0) < 1e-3)) self.assertTrue(np.all(np.abs(np.var(input_vector, axis=0) - 1) < 1e-3)) def test_call(self): # Tests that all call wrap to encode_plus and batch_encode_plus feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) # create three inputs of length 800, 1000, and 1200 speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] # Test feature size input_features = feature_extractor(np_speech_inputs, padding=True, return_tensors="np").input_features self.assertTrue(input_features.ndim == 3) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size) # Test not batched input encoded_sequences_1 = feature_extractor(speech_inputs[0], return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs[0], return_tensors="np").input_features self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3)) # Test batched encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) # Test 2-D numpy arrays are batched. speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)] np_speech_inputs = np.asarray(speech_inputs) encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) def test_cepstral_mean_and_variance_normalization(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] paddings = ["longest", "max_length", "do_not_pad"] max_lengths = [None, 16, None] for max_length, padding in zip(max_lengths, paddings): inputs = feature_extractor( speech_inputs, padding=padding, max_length=max_length, return_attention_mask=True ) input_features = inputs.input_features attention_mask = inputs.attention_mask fbank_feat_lengths = [np.sum(x) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]]) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]]) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]]) def test_cepstral_mean_and_variance_normalization_np(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] paddings = ["longest", "max_length", "do_not_pad"] max_lengths = [None, 16, None] for max_length, padding in zip(max_lengths, paddings): inputs = feature_extractor( speech_inputs, max_length=max_length, padding=padding, return_tensors="np", return_attention_mask=True ) input_features = inputs.input_features attention_mask = inputs.attention_mask fbank_feat_lengths = [np.sum(x) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]]) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]]) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]]) def test_cepstral_mean_and_variance_normalization_trunc_max_length(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] inputs = feature_extractor( speech_inputs, padding="max_length", max_length=4, truncation=True, return_tensors="np", return_attention_mask=True, ) input_features = inputs.input_features attention_mask = inputs.attention_mask fbank_feat_lengths = np.sum(attention_mask == 1, axis=1) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]]) self._check_zero_mean_unit_variance(input_features[1]) self._check_zero_mean_unit_variance(input_features[2]) def test_cepstral_mean_and_variance_normalization_trunc_longest(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] inputs = feature_extractor( speech_inputs, padding="longest", max_length=4, truncation=True, return_tensors="np", return_attention_mask=True, ) input_features = inputs.input_features attention_mask = inputs.attention_mask fbank_feat_lengths = np.sum(attention_mask == 1, axis=1) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]]) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]]) self._check_zero_mean_unit_variance(input_features[2]) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape, (3, 4, 24)) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] inputs = feature_extractor( speech_inputs, padding="longest", max_length=16, truncation=True, return_tensors="np", return_attention_mask=True, ) input_features = inputs.input_features attention_mask = inputs.attention_mask fbank_feat_lengths = np.sum(attention_mask == 1, axis=1) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]]) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]]) self._check_zero_mean_unit_variance(input_features[2]) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape, (3, 6, 24)) def test_double_precision_pad(self): import torch feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) np_speech_inputs = np.random.rand(100, 32).astype(np.float64) py_speech_inputs = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: np_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="np") self.assertTrue(np_processed.input_features.dtype == np.float32) pt_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt") self.assertTrue(pt_processed.input_features.dtype == torch.float32) def _load_datasamples(self, num_samples): from datasets import load_dataset ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_integration(self): # fmt: off expected = np.array([ -1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241, -1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128, -1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625, ]) # fmt: on input_speech = self._load_datasamples(1) feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) input_features = feature_extractor(input_speech, return_tensors="pt").input_features self.assertEquals(input_features.shape, (1, 584, 24)) self.assertTrue(np.allclose(input_features[0, 0, :30], expected, atol=1e-4)) def test_feat_extract_from_and_save_pretrained(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: saved_file = feat_extract_first.save_pretrained(tmpdirname)[0] check_json_file_has_correct_format(saved_file) feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname) dict_first = feat_extract_first.to_dict() dict_second = feat_extract_second.to_dict() self.assertDictEqual(dict_first, dict_second) def test_feat_extract_to_json_file(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: json_file_path = os.path.join(tmpdirname, "feat_extract.json") feat_extract_first.to_json_file(json_file_path) feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path) dict_first = feat_extract_first.to_dict() dict_second = feat_extract_second.to_dict() self.assertEqual(dict_first, dict_second) # exact same tests than before, except that we simulate that torchaudio is not available @require_torch @unittest.mock.patch( "transformers.models.speech_to_text.feature_extraction_speech_to_text.is_speech_available", lambda: False ) class Speech2TextFeatureExtractionWithoutTorchaudioTest(Speech2TextFeatureExtractionTest): def test_using_audio_utils(self): # Tests that it uses audio_utils instead of torchaudio feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) self.assertTrue(hasattr(feat_extract, "window")) self.assertTrue(hasattr(feat_extract, "mel_filters")) from transformers.models.speech_to_text.feature_extraction_speech_to_text import is_speech_available self.assertFalse(is_speech_available())
transformers/tests/models/speech_to_text/test_feature_extraction_speech_to_text.py/0
{ "file_path": "transformers/tests/models/speech_to_text/test_feature_extraction_speech_to_text.py", "repo_id": "transformers", "token_count": 6315 }
406
# coding=utf-8 # Copyright 2020 The SqueezeBert authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class SqueezeBertModelTester(object): def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=64, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, q_groups=2, k_groups=2, v_groups=2, post_attention_groups=2, intermediate_groups=4, output_groups=1, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.q_groups = q_groups self.k_groups = k_groups self.v_groups = v_groups self.post_attention_groups = post_attention_groups self.intermediate_groups = intermediate_groups self.output_groups = output_groups def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return SqueezeBertConfig( embedding_size=self.hidden_size, vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, attention_probs_dropout_prob=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, q_groups=self.q_groups, k_groups=self.k_groups, v_groups=self.v_groups, post_attention_groups=self.post_attention_groups, intermediate_groups=self.intermediate_groups, output_groups=self.output_groups, ) def create_and_check_squeezebert_model( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = SqueezeBertModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_squeezebert_for_masked_lm( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = SqueezeBertForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_squeezebert_for_question_answering( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = SqueezeBertForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, start_positions=sequence_labels, end_positions=sequence_labels ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_squeezebert_for_sequence_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = SqueezeBertForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_squeezebert_for_token_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = SqueezeBertForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_squeezebert_for_multiple_choice( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = SqueezeBertForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, input_ids, input_mask, sequence_labels, token_labels, choice_labels) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class SqueezeBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) pipeline_model_mapping = ( { "feature-extraction": SqueezeBertModel, "fill-mask": SqueezeBertForMaskedLM, "question-answering": SqueezeBertForQuestionAnswering, "text-classification": SqueezeBertForSequenceClassification, "token-classification": SqueezeBertForTokenClassification, "zero-shot": SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = True test_head_masking = False def setUp(self): self.model_tester = SqueezeBertModelTester(self) self.config_tester = ConfigTester(self, config_class=SqueezeBertConfig, dim=37) def test_config(self): self.config_tester.run_common_tests() def test_squeezebert_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = SqueezeBertModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_sentencepiece @require_tokenizers @require_torch class SqueezeBertModelIntegrationTest(unittest.TestCase): @slow def test_inference_classification_head(self): model = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli") input_ids = torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]]) output = model(input_ids)[0] expected_shape = torch.Size((1, 3)) self.assertEqual(output.shape, expected_shape) expected_tensor = torch.tensor([[0.6401, -0.0349, -0.6041]]) self.assertTrue(torch.allclose(output, expected_tensor, atol=1e-4))
transformers/tests/models/squeezebert/test_modeling_squeezebert.py/0
{ "file_path": "transformers/tests/models/squeezebert/test_modeling_squeezebert.py", "repo_id": "transformers", "token_count": 5314 }
407
# coding=utf-8 # Copyright 2018 Google T5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import os import pickle import tempfile import unittest from transformers import T5Config, is_torch_available from transformers.models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES from transformers.testing_utils import ( require_accelerate, require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from transformers.utils import cached_property, is_torch_fx_available from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_fx_available(): from transformers.utils.fx import symbolic_trace if is_torch_available(): import torch from transformers import ( AutoTokenizer, ByT5Tokenizer, T5EncoderModel, T5ForConditionalGeneration, T5ForQuestionAnswering, T5ForSequenceClassification, T5ForTokenClassification, T5Model, T5Tokenizer, ) from transformers.models.t5.modeling_t5 import T5_PRETRAINED_MODEL_ARCHIVE_LIST class T5ModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, decoder_seq_length=7, # For common tests is_training=True, use_attention_mask=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, dropout_rate=0.1, initializer_factor=0.002, eos_token_id=1, pad_token_id=0, decoder_start_token_id=0, scope=None, decoder_layers=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.scope = None self.decoder_layers = decoder_layers def get_large_model_config(self): return T5Config.from_pretrained("t5-base") def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size).clamp(2) input_ids[:, -1] = self.eos_token_id # Eos Token decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None decoder_attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) decoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = self.get_config() return ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) def get_pipeline_config(self): return T5Config( vocab_size=166, # t5 forces 100 extra tokens d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) def get_config(self): return T5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) def check_prepare_lm_labels_via_shift_left( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = T5Model(config=config) model.to(torch_device) model.eval() # make sure that lm_labels are correctly padded from the right lm_labels.masked_fill_((lm_labels == self.decoder_start_token_id), self.eos_token_id) # add casaul pad token mask triangular_mask = torch.tril(lm_labels.new_ones(lm_labels.shape)).logical_not() lm_labels.masked_fill_(triangular_mask, self.pad_token_id) decoder_input_ids = model._shift_right(lm_labels) for i, (decoder_input_ids_slice, lm_labels_slice) in enumerate(zip(decoder_input_ids, lm_labels)): # first item self.parent.assertEqual(decoder_input_ids_slice[0].item(), self.decoder_start_token_id) if i < decoder_input_ids_slice.shape[-1]: if i < decoder_input_ids.shape[-1] - 1: # items before diagonal self.parent.assertListEqual( decoder_input_ids_slice[1 : i + 1].tolist(), lm_labels_slice[:i].tolist() ) # pad items after diagonal if i < decoder_input_ids.shape[-1] - 2: self.parent.assertListEqual( decoder_input_ids_slice[i + 2 :].tolist(), lm_labels_slice[i + 1 : -1].tolist() ) else: # all items after square self.parent.assertListEqual(decoder_input_ids_slice[1:].tolist(), lm_labels_slice[:-1].tolist()) def create_and_check_model( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = T5Model(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) result = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) decoder_output = result.last_hidden_state decoder_past = result.past_key_values encoder_output = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size)) self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size)) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(decoder_past), config.num_layers) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0]), 4) def create_and_check_with_lm_head( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = T5ForConditionalGeneration(config=config).to(torch_device).eval() outputs = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, labels=lm_labels, ) self.parent.assertEqual(len(outputs), 4) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.decoder_seq_length, self.vocab_size)) self.parent.assertEqual(outputs["loss"].size(), ()) def create_and_check_with_sequence_classification_head( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): labels = torch.tensor([1] * self.batch_size, dtype=torch.long, device=torch_device) model = T5ForSequenceClassification(config=config).to(torch_device).eval() outputs = model( input_ids=input_ids, decoder_input_ids=input_ids, labels=labels, ) # self.parent.assertEqual(len(outputs), 4) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, config.num_labels)) self.parent.assertEqual(outputs["loss"].size(), ()) def create_and_check_decoder_model_past( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = T5Model(config=config).get_decoder().to(torch_device).eval() # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) output, past_key_values = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_decoder_model_attention_mask_past( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = T5Model(config=config).get_decoder() model.to(torch_device) model.eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = input_ids.shape[-1] // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass output, past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True).to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values, attention_mask=attn_mask)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = T5Model(config=config).get_decoder().to(torch_device).eval() # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_generate_with_past_key_values( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = T5ForConditionalGeneration(config=config).to(torch_device).eval() torch.manual_seed(0) output_without_past_cache = model.generate( input_ids[:1], num_beams=2, max_length=5, do_sample=True, use_cache=False ) torch.manual_seed(0) output_with_past_cache = model.generate(input_ids[:1], num_beams=2, max_length=5, do_sample=True) self.parent.assertTrue(torch.all(output_with_past_cache == output_without_past_cache)) def create_and_check_model_fp16_forward( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = T5Model(config=config).to(torch_device).half().eval() output = model(input_ids, decoder_input_ids=input_ids, attention_mask=attention_mask)["last_hidden_state"] self.parent.assertFalse(torch.isnan(output).any().item()) def create_and_check_encoder_decoder_shared_weights( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): for model_class in [T5Model, T5ForConditionalGeneration]: torch.manual_seed(0) model = model_class(config=config).to(torch_device).eval() # load state dict copies weights but does not tie them model.encoder.load_state_dict(model.decoder.state_dict(), strict=False) torch.manual_seed(0) tied_config = copy.deepcopy(config) tied_config.tie_encoder_decoder = True tied_model = model_class(config=tied_config).to(torch_device).eval() model_result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) tied_model_result = tied_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) # check that models has less parameters self.parent.assertLess( sum(p.numel() for p in tied_model.parameters()), sum(p.numel() for p in model.parameters()) ) random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item() # check that outputs are equal self.parent.assertTrue( torch.allclose( model_result[0][0, :, random_slice_idx], tied_model_result[0][0, :, random_slice_idx], atol=1e-4 ) ) # check that outputs after saving and loading are equal with tempfile.TemporaryDirectory() as tmpdirname: tied_model.save_pretrained(tmpdirname) tied_model = model_class.from_pretrained(tmpdirname) tied_model.to(torch_device) tied_model.eval() # check that models has less parameters self.parent.assertLess( sum(p.numel() for p in tied_model.parameters()), sum(p.numel() for p in model.parameters()) ) random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item() tied_model_result = tied_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) # check that outputs are equal self.parent.assertTrue( torch.allclose( model_result[0][0, :, random_slice_idx], tied_model_result[0][0, :, random_slice_idx], atol=1e-4, ) ) def check_resize_embeddings_t5_v1_1( self, config, ): prev_vocab_size = config.vocab_size config.tie_word_embeddings = False model = T5ForConditionalGeneration(config=config).to(torch_device).eval() model.resize_token_embeddings(prev_vocab_size - 10) self.parent.assertEqual(model.get_input_embeddings().weight.shape[0], prev_vocab_size - 10) self.parent.assertEqual(model.get_output_embeddings().weight.shape[0], prev_vocab_size - 10) self.parent.assertEqual(model.config.vocab_size, prev_vocab_size - 10) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "use_cache": False, } return config, inputs_dict @require_torch class T5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (T5Model, T5ForConditionalGeneration, T5ForSequenceClassification, T5ForQuestionAnswering) if is_torch_available() else () ) all_generative_model_classes = (T5ForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( { "conversational": T5ForConditionalGeneration, "feature-extraction": T5Model, "question-answering": T5ForQuestionAnswering, "summarization": T5ForConditionalGeneration, "text-classification": T5ForSequenceClassification, "text2text-generation": T5ForConditionalGeneration, "translation": T5ForConditionalGeneration, "zero-shot": T5ForSequenceClassification, } if is_torch_available() else {} ) all_parallelizable_model_classes = (T5Model, T5ForConditionalGeneration) if is_torch_available() else () fx_compatible = True test_pruning = False test_resize_embeddings = True test_model_parallel = True is_encoder_decoder = True # The small T5 model needs higher percentages for CPU/MP tests model_split_percents = [0.8, 0.9] def setUp(self): self.model_tester = T5ModelTester(self) self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37) # `QAPipelineTests` is not working well with slow tokenizers (for some models) and we don't want to touch the file # `src/transformers/data/processors/squad.py` (where this test fails for this model) def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, processor_name ): if tokenizer_name is None: return True if pipeline_test_case_name == "QAPipelineTests" and not tokenizer_name.endswith("Fast"): return True return False def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False): if not is_torch_fx_available() or not self.fx_compatible: return configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.return_dict = False for model_class in self.all_model_classes: if model_class.__name__ == "T5ForSequenceClassification": continue model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=output_loss) try: if model.config.is_encoder_decoder: model.config.use_cache = False # FSTM still requires this hack -> FSTM should probably be refactored similar to BART afterward labels = inputs.get("labels", None) input_names = [ "attention_mask", "decoder_attention_mask", "decoder_input_ids", "input_features", "input_ids", "input_values", ] if labels is not None: input_names.append("labels") filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} input_names = list(filtered_inputs.keys()) model_output = model(**filtered_inputs) traced_model = symbolic_trace(model, input_names) traced_output = traced_model(**filtered_inputs) else: input_names = [ "attention_mask", "bbox", "input_features", "input_ids", "input_values", "pixel_values", "token_type_ids", "visual_feats", "visual_pos", ] labels = inputs.get("labels", None) start_positions = inputs.get("start_positions", None) end_positions = inputs.get("end_positions", None) if labels is not None: input_names.append("labels") if start_positions is not None: input_names.append("start_positions") if end_positions is not None: input_names.append("end_positions") filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} input_names = list(filtered_inputs.keys()) if model.__class__.__name__ in set(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES.values()) and ( not hasattr(model.config, "problem_type") or model.config.problem_type is None ): model.config.problem_type = "single_label_classification" traced_model = symbolic_trace(model, input_names) traced_output = traced_model(**filtered_inputs) model_output = model(**filtered_inputs) except Exception as e: self.fail(f"Couldn't trace module: {e}") def flatten_output(output): flatten = [] for x in output: if isinstance(x, (tuple, list)): flatten += flatten_output(x) elif not isinstance(x, torch.Tensor): continue else: flatten.append(x) return flatten model_output = flatten_output(model_output) traced_output = flatten_output(traced_output) num_outputs = len(model_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], traced_output[i]), f"traced {i}th output doesn't match model {i}th output for {model_class}", ) # Test that the model can be serialized and restored properly with tempfile.TemporaryDirectory() as tmp_dir_name: pkl_file_name = os.path.join(tmp_dir_name, "model.pkl") try: with open(pkl_file_name, "wb") as f: pickle.dump(traced_model, f) with open(pkl_file_name, "rb") as f: loaded = pickle.load(f) except Exception as e: self.fail(f"Couldn't serialize / deserialize the traced model: {e}") loaded_output = loaded(**filtered_inputs) loaded_output = flatten_output(loaded_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], loaded_output[i]), f"serialized model {i}th output doesn't match model {i}th output for {model_class}", ) # Avoid memory leak. Without this, each call increase RAM usage by ~20MB. # (Even with this call, there are still memory leak by ~0.04MB) self.clear_torch_jit_class_registry() def test_config(self): self.config_tester.run_common_tests() def test_shift_right(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_prepare_lm_labels_via_shift_left(*config_and_inputs) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_v1_1(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() # check that gated gelu feed forward and different word embeddings work config = config_and_inputs[0] config.tie_word_embeddings = False config.feed_forward_proj = "gated-gelu" self.model_tester.create_and_check_model(config, *config_and_inputs[1:]) # T5ForSequenceClassification does not support inputs_embeds def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (T5Model, T5ForConditionalGeneration, T5ForQuestionAnswering): model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): model(**inputs)[0] def test_config_and_model_silu_gated(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() config = config_and_inputs[0] config.feed_forward_proj = "gated-silu" self.model_tester.create_and_check_model(*config_and_inputs) def test_with_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_with_lm_head(*config_and_inputs) def test_with_sequence_classification_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_with_sequence_classification_head(*config_and_inputs) def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) def test_decoder_model_past_with_attn_mask(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) def test_decoder_model_past_with_3d_attn_mask(self): ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = self.model_tester.prepare_config_and_inputs() attention_mask = ids_tensor( [self.model_tester.batch_size, self.model_tester.encoder_seq_length, self.model_tester.encoder_seq_length], vocab_size=2, ) decoder_attention_mask = ids_tensor( [self.model_tester.batch_size, self.model_tester.decoder_seq_length, self.model_tester.decoder_seq_length], vocab_size=2, ) self.model_tester.create_and_check_decoder_model_attention_mask_past( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_generate_with_past_key_values(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_generate_with_past_key_values(*config_and_inputs) def test_encoder_decoder_shared_weights(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_encoder_decoder_shared_weights(*config_and_inputs) @unittest.skipIf(torch_device == "cpu", "Cant do half precision") def test_model_fp16_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs) def test_v1_1_resize_embeddings(self): config = self.model_tester.prepare_config_and_inputs()[0] self.model_tester.check_resize_embeddings_t5_v1_1(config) @slow def test_model_from_pretrained(self): for model_name in T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = T5Model.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip("Test has a segmentation fault on torch 1.8.0") def test_export_to_onnx(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() model = T5Model(config_and_inputs[0]).to(torch_device) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( model, (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]), f"{tmpdirname}/t5_test.onnx", export_params=True, opset_version=9, input_names=["input_ids", "decoder_input_ids"], ) def test_generate_with_head_masking(self): attention_names = ["encoder_attentions", "decoder_attentions", "cross_attentions"] config_and_inputs = self.model_tester.prepare_config_and_inputs() config = config_and_inputs[0] max_length = config_and_inputs[1].shape[-1] + 3 model = T5ForConditionalGeneration(config).eval() model.to(torch_device) head_masking = { "head_mask": torch.zeros(config.num_layers, config.num_heads, device=torch_device), "decoder_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=torch_device), "cross_attn_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=torch_device), } for attn_name, (name, mask) in zip(attention_names, head_masking.items()): head_masks = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": head_masks["decoder_head_mask"] = torch.ones( config.num_decoder_layers, config.num_heads, device=torch_device ) out = model.generate( config_and_inputs[1], num_beams=1, max_length=max_length, output_attentions=True, return_dict_in_generate=True, **head_masks, ) # We check the state of decoder_attentions and cross_attentions just from the last step attn_weights = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights]), 0.0) @unittest.skip("Does not work on the tiny model as we keep hitting edge cases.") def test_disk_offload(self): pass @unittest.skip("Does not support conversations.") def test_pipeline_conversational(self): pass class T5EncoderOnlyModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, # For common tests use_attention_mask=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, is_training=False, dropout_rate=0.1, initializer_factor=0.002, is_encoder_decoder=False, eos_token_id=1, pad_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length # For common tests self.seq_length = self.encoder_seq_length self.use_attention_mask = use_attention_mask self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.is_encoder_decoder = is_encoder_decoder self.scope = None self.is_training = is_training def get_large_model_config(self): return T5Config.from_pretrained("t5-base") def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) config = T5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, is_encoder_decoder=self.is_encoder_decoder, ) return ( config, input_ids, attention_mask, ) def create_and_check_model( self, config, input_ids, attention_mask, ): model = T5EncoderModel(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, attention_mask=attention_mask, ) result = model(input_ids=input_ids) encoder_output = result.last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size)) def create_and_check_model_fp16_forward( self, config, input_ids, attention_mask, ): model = T5EncoderModel(config=config).to(torch_device).half().eval() output = model(input_ids, attention_mask=attention_mask)["last_hidden_state"] self.parent.assertFalse(torch.isnan(output).any().item()) def create_and_check_with_token_classification_head( self, config, input_ids, attention_mask, ): labels = torch.tensor([1] * self.seq_length * self.batch_size, dtype=torch.long, device=torch_device) model = T5ForTokenClassification(config=config).to(torch_device).eval() outputs = model( input_ids=input_ids, labels=labels, attention_mask=attention_mask, ) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.seq_length, config.num_labels)) self.parent.assertEqual(outputs["loss"].size(), ()) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict class T5EncoderOnlyModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (T5EncoderModel, T5ForTokenClassification) if is_torch_available() else () test_pruning = False test_resize_embeddings = False test_model_parallel = True pipeline_model_mapping = ( { "token-classification": T5ForTokenClassification, } if is_torch_available() else {} ) all_parallelizable_model_classes = (T5EncoderModel,) if is_torch_available() else () def setUp(self): self.model_tester = T5EncoderOnlyModelTester(self) self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skipIf(torch_device == "cpu", "Cant do half precision") def test_model_fp16_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs) def test_with_token_classification_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_with_token_classification_head(*config_and_inputs) def use_task_specific_params(model, task): model.config.update(model.config.task_specific_params[task]) @require_torch @require_accelerate @require_tokenizers @slow class T5ModelFp16Tests(unittest.TestCase): def test_fp16_fp32_conversion(self): r""" A test to check whether the argument `keep_in_fp32_modules` correctly does its job """ orig_import = __import__ accelerate_mock = unittest.mock.Mock() # mock import of accelerate def import_accelerate_mock(name, *args, **kwargs): if name == "accelerate": if accelerate_available: return accelerate_mock else: raise ImportError return orig_import(name, *args, **kwargs) # Load without using `accelerate` with unittest.mock.patch("builtins.__import__", side_effect=import_accelerate_mock): accelerate_available = False model = T5ForConditionalGeneration.from_pretrained("t5-small", torch_dtype=torch.float16) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.float32) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wi.weight.dtype == torch.float16) # Load without in bf16 model = T5ForConditionalGeneration.from_pretrained("t5-small", torch_dtype=torch.bfloat16) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.bfloat16) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wi.weight.dtype == torch.bfloat16) # Load using `accelerate` in bf16 model = T5ForConditionalGeneration.from_pretrained("t5-small", torch_dtype=torch.bfloat16, device_map="auto") self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.bfloat16) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wi.weight.dtype == torch.bfloat16) # Load using `accelerate` in bf16 model = T5ForConditionalGeneration.from_pretrained( "t5-small", torch_dtype=torch.bfloat16, low_cpu_mem_usage=True ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.bfloat16) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wi.weight.dtype == torch.bfloat16) # Load without using `accelerate` model = T5ForConditionalGeneration.from_pretrained( "t5-small", torch_dtype=torch.float16, low_cpu_mem_usage=True ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.float32) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wi.weight.dtype == torch.float16) # Load using `accelerate` model = T5ForConditionalGeneration.from_pretrained("t5-small", torch_dtype=torch.float16, device_map="auto") self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.float32) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wi.weight.dtype == torch.float16) @require_torch @require_sentencepiece @require_tokenizers class T5ModelIntegrationTests(unittest.TestCase): @cached_property def model(self): return T5ForConditionalGeneration.from_pretrained("t5-base").to(torch_device) @cached_property def tokenizer(self): return T5Tokenizer.from_pretrained("t5-base") @slow def test_torch_quant(self): r""" Test that a simple `torch.quantization.quantize_dynamic` call works on a T5 model. """ model_name = "google/flan-t5-small" tokenizer = T5Tokenizer.from_pretrained(model_name) model = T5ForConditionalGeneration.from_pretrained(model_name) model = torch.quantization.quantize_dynamic(model, {torch.nn.Linear}, dtype=torch.qint8) input_text = "Answer the following yes/no question by reasoning step-by-step. Can you write a whole Haiku in a single tweet?" input_ids = tokenizer(input_text, return_tensors="pt").input_ids _ = model.generate(input_ids) @slow def test_small_generation(self): model = T5ForConditionalGeneration.from_pretrained("t5-small").to(torch_device) model.config.max_length = 8 model.config.num_beams = 1 model.config.do_sample = False tokenizer = T5Tokenizer.from_pretrained("t5-small") input_ids = tokenizer("summarize: Hello there", return_tensors="pt").input_ids.to(torch_device) sequences = model.generate(input_ids) output_str = tokenizer.batch_decode(sequences, skip_special_tokens=True)[0] self.assertTrue(output_str == "Hello there!") @slow def test_small_integration_test(self): """ For comparision run: >>> import t5 # pip install t5==0.7.1 >>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary >>> path_to_mtf_small_t5_checkpoint = '<fill_in>' >>> path_to_mtf_small_spm_model_path = '<fill_in>' >>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_checkpoint, batch_size=1, tpu=None) >>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100) >>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab) """ model = T5ForConditionalGeneration.from_pretrained("t5-small").to(torch_device) tokenizer = T5Tokenizer.from_pretrained("t5-small") input_ids = tokenizer("Hello there", return_tensors="pt").input_ids labels = tokenizer("Hi I am", return_tensors="pt").input_ids loss = model(input_ids.to(torch_device), labels=labels.to(torch_device)).loss mtf_score = -(labels.shape[-1] * loss.item()) EXPECTED_SCORE = -19.0845 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) @slow def test_small_v1_1_integration_test(self): """ For comparision run: >>> import t5 # pip install t5==0.7.1 >>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary >>> path_to_mtf_small_t5_v1_1_checkpoint = '<fill_in>' >>> path_to_mtf_small_spm_model_path = '<fill_in>' >>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_v1_1_checkpoint, batch_size=1, tpu=None) >>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100) >>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab) """ model = T5ForConditionalGeneration.from_pretrained("google/t5-v1_1-small").to(torch_device) tokenizer = T5Tokenizer.from_pretrained("google/t5-v1_1-small") input_ids = tokenizer("Hello there", return_tensors="pt").input_ids labels = tokenizer("Hi I am", return_tensors="pt").input_ids loss = model(input_ids.to(torch_device), labels=labels.to(torch_device)).loss mtf_score = -(labels.shape[-1] * loss.item()) EXPECTED_SCORE = -59.0293 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) @slow def test_small_byt5_integration_test(self): """ For comparision run: >>> import t5 # pip install t5==0.9.1 >>> path_to_byt5_small_checkpoint = '<fill_in>' >>> t5_model = t5.models.MtfModel(model_dir=path_to_tf_checkpoint, batch_size=1, tpu=None) >>> vocab = t5.data.ByteVocabulary() >>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab) """ model = T5ForConditionalGeneration.from_pretrained("google/byt5-small").to(torch_device) tokenizer = ByT5Tokenizer.from_pretrained("google/byt5-small") input_ids = tokenizer("Hello there", return_tensors="pt").input_ids labels = tokenizer("Hi I am", return_tensors="pt").input_ids loss = model(input_ids.to(torch_device), labels=labels.to(torch_device)).loss mtf_score = -(labels.shape[-1] * loss.item()) EXPECTED_SCORE = -60.7397 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) @slow def test_summarization(self): model = self.model tok = self.tokenizer FRANCE_ARTICLE = ( # @noqa "Marseille, France (CNN)The French prosecutor leading an investigation into the crash of Germanwings" " Flight 9525 insisted Wednesday that he was not aware of any video footage from on board the plane." ' Marseille prosecutor Brice Robin told CNN that "so far no videos were used in the crash investigation."' ' He added, "A person who has such a video needs to immediately give it to the investigators." Robin\'s' " comments follow claims by two magazines, German daily Bild and French Paris Match, of a cell phone video" " showing the harrowing final seconds from on board Germanwings Flight 9525 as it crashed into the French" " Alps. All 150 on board were killed. Paris Match and Bild reported that the video was recovered from a" " phone at the wreckage site. The two publications described the supposed video, but did not post it on" " their websites. The publications said that they watched the video, which was found by a source close to" " the investigation. \"One can hear cries of 'My God' in several languages,\" Paris Match reported." ' "Metallic banging can also be heard more than three times, perhaps of the pilot trying to open the' " cockpit door with a heavy object. Towards the end, after a heavy shake, stronger than the others, the" ' screaming intensifies. Then nothing." "It is a very disturbing scene," said Julian Reichelt,' " editor-in-chief of Bild online. An official with France's accident investigation agency, the BEA, said" " the agency is not aware of any such video. Lt. Col. Jean-Marc Menichini, a French Gendarmerie spokesman" " in charge of communications on rescue efforts around the Germanwings crash site, told CNN that the" ' reports were "completely wrong" and "unwarranted." Cell phones have been collected at the site, he said,' ' but that they "hadn\'t been exploited yet." Menichini said he believed the cell phones would need to be' " sent to the Criminal Research Institute in Rosny sous-Bois, near Paris, in order to be analyzed by" " specialized technicians working hand-in-hand with investigators. But none of the cell phones found so" " far have been sent to the institute, Menichini said. Asked whether staff involved in the search could" ' have leaked a memory card to the media, Menichini answered with a categorical "no." Reichelt told "Erin' ' Burnett: Outfront" that he had watched the video and stood by the report, saying Bild and Paris Match' ' are "very confident" that the clip is real. He noted that investigators only revealed they\'d recovered' ' cell phones from the crash site after Bild and Paris Match published their reports. "That is something' " we did not know before. ... Overall we can say many things of the investigation weren't revealed by the" ' investigation at the beginning," he said. What was mental state of Germanwings co-pilot? German airline' " Lufthansa confirmed Tuesday that co-pilot Andreas Lubitz had battled depression years before he took the" " controls of Germanwings Flight 9525, which he's accused of deliberately crashing last week in the" ' French Alps. Lubitz told his Lufthansa flight training school in 2009 that he had a "previous episode of' ' severe depression," the airline said Tuesday. Email correspondence between Lubitz and the school' " discovered in an internal investigation, Lufthansa said, included medical documents he submitted in" " connection with resuming his flight training. The announcement indicates that Lufthansa, the parent" " company of Germanwings, knew of Lubitz's battle with depression, allowed him to continue training and" " ultimately put him in the cockpit. Lufthansa, whose CEO Carsten Spohr previously said Lubitz was 100%" ' fit to fly, described its statement Tuesday as a "swift and seamless clarification" and said it was' " sharing the information and documents -- including training and medical records -- with public" " prosecutors. Spohr traveled to the crash site Wednesday, where recovery teams have been working for the" " past week to recover human remains and plane debris scattered across a steep mountainside. He saw the" " crisis center set up in Seyne-les-Alpes, laid a wreath in the village of Le Vernet, closer to the crash" " site, where grieving families have left flowers at a simple stone memorial. Menichini told CNN late" " Tuesday that no visible human remains were left at the site but recovery teams would keep searching." " French President Francois Hollande, speaking Tuesday, said that it should be possible to identify all" " the victims using DNA analysis by the end of the week, sooner than authorities had previously suggested." " In the meantime, the recovery of the victims' personal belongings will start Wednesday, Menichini said." " Among those personal belongings could be more cell phones belonging to the 144 passengers and six crew" " on board. Check out the latest from our correspondents . The details about Lubitz's correspondence with" " the flight school during his training were among several developments as investigators continued to" " delve into what caused the crash and Lubitz's possible motive for downing the jet. A Lufthansa" " spokesperson told CNN on Tuesday that Lubitz had a valid medical certificate, had passed all his" ' examinations and "held all the licenses required." Earlier, a spokesman for the prosecutor\'s office in' " Dusseldorf, Christoph Kumpa, said medical records reveal Lubitz suffered from suicidal tendencies at" " some point before his aviation career and underwent psychotherapy before he got his pilot's license." " Kumpa emphasized there's no evidence suggesting Lubitz was suicidal or acting aggressively before the" " crash. Investigators are looking into whether Lubitz feared his medical condition would cause him to" " lose his pilot's license, a European government official briefed on the investigation told CNN on" ' Tuesday. While flying was "a big part of his life," the source said, it\'s only one theory being' " considered. Another source, a law enforcement official briefed on the investigation, also told CNN that" " authorities believe the primary motive for Lubitz to bring down the plane was that he feared he would" " not be allowed to fly because of his medical problems. Lubitz's girlfriend told investigators he had" " seen an eye doctor and a neuropsychologist, both of whom deemed him unfit to work recently and concluded" " he had psychological issues, the European government official said. But no matter what details emerge" " about his previous mental health struggles, there's more to the story, said Brian Russell, a forensic" ' psychologist. "Psychology can explain why somebody would turn rage inward on themselves about the fact' " that maybe they weren't going to keep doing their job and they're upset about that and so they're" ' suicidal," he said. "But there is no mental illness that explains why somebody then feels entitled to' " also take that rage and turn it outward on 149 other people who had nothing to do with the person's" ' problems." Germanwings crash compensation: What we know . Who was the captain of Germanwings Flight' " 9525? CNN's Margot Haddad reported from Marseille and Pamela Brown from Dusseldorf, while Laura" " Smith-Spark wrote from London. CNN's Frederik Pleitgen, Pamela Boykoff, Antonia Mortensen, Sandrine" " Amiel and Anna-Maja Rappard contributed to this report." ) SHORTER_ARTICLE = ( "(CNN)The Palestinian Authority officially became the 123rd member of the International Criminal Court on" " Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The" " formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based." " The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted its" ' jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East' ' Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the' " situation in Palestinian territories, paving the way for possible war crimes investigations against" " Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and" " the United States, neither of which is an ICC member, opposed the Palestinians' efforts to join the" " body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony, said it was a" ' move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the' ' world is also a step closer to ending a long era of impunity and injustice," he said, according to an' ' ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge' " Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the" ' Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine' " acquires all the rights as well as responsibilities that come with being a State Party to the Statute." ' These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights' ' Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should' " immediately end their pressure, and countries that support universal acceptance of the court's treaty" ' should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the' " group. \"What's objectionable is the attempts to undermine international justice, not Palestine's" ' decision to join a treaty to which over 100 countries around the world are members." In January, when' " the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an" ' outrage, saying the court was overstepping its boundaries. The United States also said it "strongly"' " disagreed with the court's decision. \"As we have said repeatedly, we do not believe that Palestine is a" ' state and therefore we do not believe that it is eligible to join the ICC," the State Department said in' ' a statement. It urged the warring sides to resolve their differences through direct negotiations. "We' ' will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace,"' " it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the" ' territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the' " court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou" ' Bensouda said her office would "conduct its analysis in full independence and impartiality." The war' " between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry" " will include alleged war crimes committed since June. The International Criminal Court was set up in" " 2002 to prosecute genocide, crimes against humanity and war crimes. CNN's Vasco Cotovio, Kareem Khadder" " and Faith Karimi contributed to this report." ) IRAN_ARTICLE = ( "(CNN)The United States and its negotiating partners reached a very strong framework agreement with Iran" " in Lausanne, Switzerland, on Thursday that limits Iran's nuclear program in such a way as to effectively" " block it from building a nuclear weapon. Expect pushback anyway, if the recent past is any harbinger." " Just last month, in an attempt to head off such an agreement, House Speaker John Boehner invited Israeli" " Prime Minister Benjamin Netanyahu to preemptively blast it before Congress, and 47 senators sent a" " letter to the Iranian leadership warning them away from a deal. The debate that has already begun since" " the announcement of the new framework will likely result in more heat than light. It will not be helped" " by the gathering swirl of dubious assumptions and doubtful assertions. Let us address some of these: ." " The most misleading assertion, despite universal rejection by experts, is that the negotiations'" " objective at the outset was the total elimination of any nuclear program in Iran. That is the position" " of Netanyahu and his acolytes in the U.S. Congress. But that is not and never was the objective. If it" " had been, there would have been no Iranian team at the negotiating table. Rather, the objective has" " always been to structure an agreement or series of agreements so that Iran could not covertly develop a" " nuclear arsenal before the United States and its allies could respond. The new framework has exceeded" " expectations in achieving that goal. It would reduce Iran's low-enriched uranium stockpile, cut by" " two-thirds its number of installed centrifuges and implement a rigorous inspection regime. Another" " dubious assumption of opponents is that the Iranian nuclear program is a covert weapons program. Despite" " sharp accusations by some in the United States and its allies, Iran denies having such a program, and" " U.S. intelligence contends that Iran has not yet made the decision to build a nuclear weapon. Iran's" " continued cooperation with International Atomic Energy Agency inspections is further evidence on this" " point, and we'll know even more about Iran's program in the coming months and years because of the deal." " In fact, the inspections provisions that are part of this agreement are designed to protect against any" " covert action by the Iranians. What's more, the rhetoric of some members of Congress has implied that" " the negotiations have been between only the United States and Iran (i.e., the 47 senators' letter" " warning that a deal might be killed by Congress or a future president). This of course is not the case." " The talks were between Iran and the five permanent members of the U.N. Security Council (United States," " United Kingdom, France, China and Russia) plus Germany, dubbed the P5+1. While the United States has" " played a leading role in the effort, it negotiated the terms alongside its partners. If the agreement" " reached by the P5+1 is rejected by Congress, it could result in an unraveling of the sanctions on Iran" " and threaten NATO cohesion in other areas. Another questionable assertion is that this agreement" " contains a sunset clause, after which Iran will be free to do as it pleases. Again, this is not the" " case. Some of the restrictions on Iran's nuclear activities, such as uranium enrichment, will be eased" " or eliminated over time, as long as 15 years. But most importantly, the framework agreement includes" " Iran's ratification of the Additional Protocol, which allows IAEA inspectors expanded access to nuclear" " sites both declared and nondeclared. This provision will be permanent. It does not sunset. Thus, going" " forward, if Iran decides to enrich uranium to weapons-grade levels, monitors will be able to detect such" " a move in a matter of days and alert the U.N. Security Council. Many in Congress have said that the" ' agreement should be a formal treaty requiring the Senate to "advise and consent." But the issue is not' " suited for a treaty. Treaties impose equivalent obligations on all signatories. For example, the New" " START treaty limits Russia and the United States to 1,550 deployed strategic warheads. But any agreement" " with Iran will not be so balanced. The restrictions and obligations in the final framework agreement" " will be imposed almost exclusively on Iran. The P5+1 are obligated only to ease and eventually remove" " most but not all economic sanctions, which were imposed as leverage to gain this final deal. Finally" " some insist that any agreement must address Iranian missile programs, human rights violations or support" " for Hamas or Hezbollah. As important as these issues are, and they must indeed be addressed, they are" " unrelated to the most important aim of a nuclear deal: preventing a nuclear Iran. To include them in" " the negotiations would be a poison pill. This agreement should be judged on its merits and on how it" " affects the security of our negotiating partners and allies, including Israel. Those judgments should be" " fact-based, not based on questionable assertions or dubious assumptions." ) ARTICLE_SUBWAY = ( "New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A" " year later, she got married again in Westchester County, but to a different man and without divorcing" " her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos" ' declared "I do" five more times, sometimes only within two weeks of each other. In 2010, she married' " once more, this time in the Bronx. In an application for a marriage license, she stated it was her" ' "first and only" marriage. Barrientos, now 39, is facing two criminal counts of "offering a false' ' instrument for filing in the first degree," referring to her false statements on the 2010 marriage' " license application, according to court documents. Prosecutors said the marriages were part of an" " immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to" " her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was" " arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New" " York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total," " Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All" " occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be" " married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors" " said the immigration scam involved some of her husbands, who filed for permanent residence status" " shortly after the marriages. Any divorces happened only after such filings were approved. It was" " unclear whether any of the men will be prosecuted. The case was referred to the Bronx District" " Attorney's Office by Immigration and Customs Enforcement and the Department of Homeland Security's" ' Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt,' " Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his" " native Pakistan after an investigation by the Joint Terrorism Task Force. If convicted, Barrientos faces" " up to four years in prison. Her next court appearance is scheduled for May 18." ) expected_summaries = [ 'prosecutor: "so far no videos were used in the crash investigation" two magazines claim to have found a' " cell phone video of the final seconds . \"one can hear cries of 'My God' in several languages,\" one" " magazine says .", "the formal accession was marked by a ceremony at The Hague, in the Netherlands . the ICC opened a" " preliminary examination into the situation in the occupied Palestinian territory . as members of the" " court, Palestinians may be subject to counter-charges as well .", "the u.s. and its negotiating partners reached a very strong framework agreement with Iran . aaron miller:" " the debate that has already begun since the announcement of the new framework will likely result in more" " heat than light . the deal would reduce Iran's low-enriched uranium stockpile, cut centrifuges and" " implement a rigorous inspection regime .", "prosecutors say the marriages were part of an immigration scam . if convicted, barrientos faces two" ' criminal counts of "offering a false instrument for filing in the first degree" she has been married 10' " times, with nine of her marriages occurring between 1999 and 2002 .", ] use_task_specific_params(model, "summarization") dct = tok( [model.config.prefix + x for x in [FRANCE_ARTICLE, SHORTER_ARTICLE, IRAN_ARTICLE, ARTICLE_SUBWAY]], padding="max_length", truncation=True, return_tensors="pt", ).to(torch_device) self.assertEqual(512, dct["input_ids"].shape[1]) hypotheses_batch = model.generate( **dct, num_beams=4, length_penalty=2.0, max_length=142, min_length=56, no_repeat_ngram_size=3, do_sample=False, early_stopping=True, ) decoded = tok.batch_decode(hypotheses_batch, skip_special_tokens=True, clean_up_tokenization_spaces=False) self.assertListEqual( expected_summaries, decoded, ) @slow def test_translation_en_to_de(self): model = self.model tok = self.tokenizer use_task_specific_params(model, "translation_en_to_de") en_text = '"Luigi often said to me that he never wanted the brothers to end up in court", she wrote.' expected_translation = ( '"Luigi sagte mir oft, dass er nie wollte, dass die Brüder am Gericht sitzen", schrieb sie.' ) input_ids = tok.encode(model.config.prefix + en_text, return_tensors="pt") input_ids = input_ids.to(torch_device) output = model.generate(input_ids) translation = tok.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False) self.assertEqual(translation, expected_translation) @slow def test_translation_en_to_fr(self): model = self.model # t5-base tok = self.tokenizer use_task_specific_params(model, "translation_en_to_fr") en_text = ( ' This image section from an infrared recording by the Spitzer telescope shows a "family portrait" of' " countless generations of stars: the oldest stars are seen as blue dots. " ) input_ids = tok.encode(model.config.prefix + en_text, return_tensors="pt") input_ids = input_ids.to(torch_device) output = model.generate( input_ids=input_ids, num_beams=4, length_penalty=2.0, max_length=100, no_repeat_ngram_size=3, do_sample=False, early_stopping=True, ) translation = tok.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False) new_truncated_translation = ( "Cette section d'images provenant de l'enregistrement infrarouge effectué par le télescope Spitzer montre " "un " "« portrait familial » de générations innombrables d’étoiles : les plus anciennes sont observées " "sous forme " "de points bleus." ) self.assertEqual(translation, new_truncated_translation) @slow def test_translation_en_to_ro(self): model = self.model tok = self.tokenizer use_task_specific_params(model, "translation_en_to_ro") en_text = "Taco Bell said it plans to add 2,000 locations in the US by 2022." expected_translation = "Taco Bell a declarat că intenţionează să adauge 2 000 de locaţii în SUA până în 2022." inputs = tok(model.config.prefix + en_text, return_tensors="pt").to(torch_device) output = model.generate(**inputs) translation = tok.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False) self.assertEqual(translation, expected_translation) @slow def test_contrastive_search_t5(self): article = ( " New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A" " year later, she got married again in Westchester County, but to a different man and without divorcing" " her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos" ' declared "I do" five more times, sometimes only within two weeks of each other. In 2010, she married' " once more, this time in the Bronx. In an application for a marriage license, she stated it was her" ' "first and only" marriage. Barrientos, now 39, is facing two criminal counts of "offering a false' ' instrument for filing in the first degree," referring to her false statements on the 2010 marriage' " license application, according to court documents. Prosecutors said the marriages were part of an" " immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to" " her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was" " arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New" " York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total," " Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All" " occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be" " married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors" " said the immigration scam involved some of her husbands, who filed for permanent residence status" " shortly after the marriages. Any divorces happened only after such filings were approved. It was" " unclear whether any of the men will be prosecuted. The case was referred to the Bronx District" " Attorney's Office by Immigration and Customs Enforcement and the Department of Homeland Security's" ' Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt,' " Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his" " native Pakistan after an investigation by the Joint Terrorism Task Force. If convicted, Barrientos faces" " up to four years in prison. Her next court appearance is scheduled for May 18." ) article = "summarize: " + article.strip() t5_tokenizer = AutoTokenizer.from_pretrained("flax-community/t5-base-cnn-dm") t5_model = T5ForConditionalGeneration.from_pretrained("flax-community/t5-base-cnn-dm").to(torch_device) input_ids = t5_tokenizer( article, add_special_tokens=False, truncation=True, max_length=512, return_tensors="pt" ).input_ids.to(torch_device) outputs = t5_model.generate(input_ids, penalty_alpha=0.5, top_k=5, max_length=64) generated_text = t5_tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "Liana Barrientos has been married 10 times, nine of them in the Bronx. Her husbands filed for " "permanent residence after the marriages, prosecutors say." ], ) @require_torch class TestAsymmetricT5(unittest.TestCase): def build_model_and_check_forward_pass(self, **kwargs): tester = T5ModelTester(self, **kwargs) config, *inputs = tester.prepare_config_and_inputs() ( input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = inputs model = T5ForConditionalGeneration(config=config).to(torch_device).eval() outputs = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, labels=lm_labels, ) # outputs = model(*inputs) assert len(outputs) == 4 assert outputs["logits"].size() == (tester.batch_size, tester.decoder_seq_length, tester.vocab_size) assert outputs["loss"].size() == () return model def test_small_decoder(self): # num_hidden_layers is passed to T5Config as num_layers model = self.build_model_and_check_forward_pass(decoder_layers=1, num_hidden_layers=2) assert len(model.encoder.block) == 2 assert len(model.decoder.block) == 1 def test_defaulting_to_symmetry(self): # num_hidden_layers is passed to T5Config as num_layers model = self.build_model_and_check_forward_pass(num_hidden_layers=2) assert len(model.decoder.block) == len(model.encoder.block) == 2
transformers/tests/models/t5/test_modeling_t5.py/0
{ "file_path": "transformers/tests/models/t5/test_modeling_t5.py", "repo_id": "transformers", "token_count": 34412 }
408
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch TrOCR model. """ import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class TrOCRStandaloneDecoderModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, d_model=16, decoder_seq_length=7, is_training=True, is_decoder=True, use_attention_mask=True, use_cache=False, use_labels=True, decoder_start_token_id=2, decoder_ffn_dim=32, decoder_layers=2, decoder_attention_heads=4, max_position_embeddings=30, pad_token_id=0, bos_token_id=1, eos_token_id=2, scope=None, ): self.parent = parent self.batch_size = batch_size self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.d_model = d_model self.hidden_size = d_model self.num_hidden_layers = decoder_layers self.decoder_layers = decoder_layers self.decoder_ffn_dim = decoder_ffn_dim self.decoder_attention_heads = decoder_attention_heads self.num_attention_heads = decoder_attention_heads self.eos_token_id = eos_token_id self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.use_cache = use_cache self.max_position_embeddings = max_position_embeddings self.scope = None self.decoder_key_length = decoder_seq_length self.base_model_out_len = 2 self.decoder_attention_idx = 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = TrOCRConfig( vocab_size=self.vocab_size, d_model=self.d_model, decoder_layers=self.decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, decoder_attention_heads=self.decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, use_cache=self.use_cache, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, max_position_embeddings=self.max_position_embeddings, ) return (config, input_ids, attention_mask, lm_labels) def create_and_check_decoder_model_past( self, config, input_ids, attention_mask, lm_labels, ): config.use_cache = True model = TrOCRDecoder(config=config).to(torch_device).eval() input_ids = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past_key_values = outputs["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((2, 1), config.vocab_size - 1) + 1 # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, lm_labels = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict @require_torch class TrOCRStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () all_generative_model_classes = (TrOCRForCausalLM,) if is_torch_available() else () pipeline_model_mapping = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {} fx_compatible = True test_pruning = False def setUp(self): self.model_tester = TrOCRStandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=TrOCRConfig) # not implemented currently def test_inputs_embeds(self): pass # trocr has no base model def test_save_load_fast_init_from_base(self): pass # trocr has no base model def test_save_load_fast_init_to_base(self): pass def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) # decoder cannot keep gradients def test_retain_grad_hidden_states_attentions(self): return @unittest.skip("The model doesn't support left padding") # and it's not used enough to be worth fixing :) def test_left_padding_compatibility(self): pass
transformers/tests/models/trocr/test_modeling_trocr.py/0
{ "file_path": "transformers/tests/models/trocr/test_modeling_trocr.py", "repo_id": "transformers", "token_count": 3144 }
409
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import os import random import tempfile import unittest import numpy as np from datasets import Audio, load_dataset from transformers import UnivNetFeatureExtractor from transformers.testing_utils import check_json_file_has_correct_format, require_torch, slow from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch global_rng = random.Random() # Copied from tests.models.whisper.test_feature_extraction_whisper.floats_list def floats_list(shape, scale=1.0, rng=None, name=None): """Creates a random float32 tensor""" if rng is None: rng = global_rng values = [] for batch_idx in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values class UnivNetFeatureExtractionTester(unittest.TestCase): def __init__( self, parent, batch_size=7, min_seq_length=400, max_seq_length=2000, feature_size=1, sampling_rate=24000, padding_value=0.0, do_normalize=True, num_mel_bins=100, hop_length=256, win_length=1024, win_function="hann_window", filter_length=1024, max_length_s=10, fmin=0.0, fmax=12000, mel_floor=1e-9, center=False, compression_factor=1.0, compression_clip_val=1e-5, normalize_min=-11.512925148010254, normalize_max=2.3143386840820312, model_in_channels=64, pad_end_length=10, ): self.parent = parent self.batch_size = batch_size self.min_seq_length = min_seq_length self.max_seq_length = max_seq_length self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) self.feature_size = feature_size self.sampling_rate = sampling_rate self.padding_value = padding_value self.do_normalize = do_normalize self.num_mel_bins = num_mel_bins self.hop_length = hop_length self.win_length = win_length self.win_function = win_function self.filter_length = filter_length self.max_length_s = max_length_s self.fmin = fmin self.fmax = fmax self.mel_floor = mel_floor self.center = center self.compression_factor = compression_factor self.compression_clip_val = compression_clip_val self.normalize_min = normalize_min self.normalize_max = normalize_max self.model_in_channels = model_in_channels self.pad_end_length = pad_end_length def prepare_feat_extract_dict(self): return { "feature_size": self.feature_size, "sampling_rate": self.sampling_rate, "padding_value": self.padding_value, "do_normalize": self.do_normalize, "num_mel_bins": self.num_mel_bins, "hop_length": self.hop_length, "win_length": self.win_length, "win_function": self.win_function, "filter_length": self.filter_length, "max_length_s": self.max_length_s, "fmin": self.fmin, "fmax": self.fmax, "mel_floor": self.mel_floor, "center": self.center, "compression_factor": self.compression_factor, "compression_clip_val": self.compression_clip_val, "normalize_min": self.normalize_min, "normalize_max": self.normalize_max, "model_in_channels": self.model_in_channels, "pad_end_length": self.pad_end_length, } def prepare_inputs_for_common(self, equal_length=False, numpify=False): def _flatten(list_of_lists): return list(itertools.chain(*list_of_lists)) if equal_length: speech_inputs = floats_list((self.batch_size, self.max_seq_length)) else: # make sure that inputs increase in size speech_inputs = [ _flatten(floats_list((x, self.feature_size))) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff) ] if numpify: speech_inputs = [np.asarray(x) for x in speech_inputs] return speech_inputs class UnivNetFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase): feature_extraction_class = UnivNetFeatureExtractor def setUp(self): self.feat_extract_tester = UnivNetFeatureExtractionTester(self) # Copied from tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTest.test_feat_extract_from_and_save_pretrained def test_feat_extract_from_and_save_pretrained(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: saved_file = feat_extract_first.save_pretrained(tmpdirname)[0] check_json_file_has_correct_format(saved_file) feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname) dict_first = feat_extract_first.to_dict() dict_second = feat_extract_second.to_dict() mel_1 = feat_extract_first.mel_filters mel_2 = feat_extract_second.mel_filters self.assertTrue(np.allclose(mel_1, mel_2)) self.assertEqual(dict_first, dict_second) # Copied from tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTest.test_feat_extract_to_json_file def test_feat_extract_to_json_file(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: json_file_path = os.path.join(tmpdirname, "feat_extract.json") feat_extract_first.to_json_file(json_file_path) feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path) dict_first = feat_extract_first.to_dict() dict_second = feat_extract_second.to_dict() mel_1 = feat_extract_first.mel_filters mel_2 = feat_extract_second.mel_filters self.assertTrue(np.allclose(mel_1, mel_2)) self.assertEqual(dict_first, dict_second) def test_call(self): # Tests that all call wrap to encode_plus and batch_encode_plus feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) # create three inputs of length 800, 1000, and 1200 speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] # Test feature size input_features = feature_extractor( np_speech_inputs, padding="max_length", max_length=1600, return_tensors="np" ).input_features self.assertTrue(input_features.ndim == 3) # Note: for some reason I get a weird padding error when feature_size > 1 # self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size) # Note: we use the shape convention (batch_size, seq_len, num_mel_bins) self.assertTrue(input_features.shape[-1] == feature_extractor.num_mel_bins) # Test not batched input encoded_sequences_1 = feature_extractor(speech_inputs[0], return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs[0], return_tensors="np").input_features self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3)) # Test batched encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) # Test 2-D numpy arrays are batched. speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)] np_speech_inputs = np.asarray(speech_inputs) encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) # Test truncation required speech_inputs = [ floats_list((1, x))[0] for x in range((feature_extractor.num_max_samples - 100), (feature_extractor.num_max_samples + 500), 200) ] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] speech_inputs_truncated = [x[: feature_extractor.num_max_samples] for x in speech_inputs] np_speech_inputs_truncated = [np.asarray(speech_input) for speech_input in speech_inputs_truncated] encoded_sequences_1 = feature_extractor(np_speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs_truncated, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) def test_batched_unbatched_consistency(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) speech_inputs = floats_list((1, 800))[0] np_speech_inputs = np.asarray(speech_inputs) # Test unbatched vs batched list encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor([speech_inputs], return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) # Test np.ndarray vs List[np.ndarray] encoded_sequences_1 = feature_extractor(np_speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor([np_speech_inputs], return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) # Test unbatched np.ndarray vs batched np.ndarray encoded_sequences_1 = feature_extractor(np_speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor( np.expand_dims(np_speech_inputs, axis=0), return_tensors="np" ).input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) def test_generate_noise(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] features = feature_extractor(speech_inputs, return_noise=True) input_features = features.input_features noise_features = features.noise_sequence for spectrogram, noise in zip(input_features, noise_features): self.assertEqual(spectrogram.shape[0], noise.shape[0]) def test_pad_end(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] input_features1 = feature_extractor(speech_inputs, padding=False, pad_end=False).input_features input_features2 = feature_extractor(speech_inputs, padding=False, pad_end=True).input_features for spectrogram1, spectrogram2 in zip(input_features1, input_features2): self.assertEqual(spectrogram1.shape[0] + self.feat_extract_tester.pad_end_length, spectrogram2.shape[0]) def test_generate_noise_and_pad_end(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] features = feature_extractor(speech_inputs, padding=False, return_noise=True, pad_end=True) input_features = features.input_features noise_features = features.noise_sequence for spectrogram, noise in zip(input_features, noise_features): self.assertEqual(spectrogram.shape[0], noise.shape[0]) @require_torch def test_batch_decode(self): import torch feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) input_lengths = list(range(800, 1400, 200)) pad_samples = feature_extractor.pad_end_length * feature_extractor.hop_length output_features = { "waveforms": torch.tensor(floats_list((3, max(input_lengths) + pad_samples))), "waveform_lengths": torch.tensor(input_lengths), } waveforms = feature_extractor.batch_decode(**output_features) for input_length, waveform in zip(input_lengths, waveforms): self.assertTrue(len(waveform.shape) == 1, msg="Individual output waveforms should be 1D") self.assertEqual(waveform.shape[0], input_length) @require_torch # Copied from tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTest.test_double_precision_pad def test_double_precision_pad(self): import torch feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) np_speech_inputs = np.random.rand(100, 32).astype(np.float64) py_speech_inputs = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: np_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="np") self.assertTrue(np_processed.input_features.dtype == np.float32) pt_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt") self.assertTrue(pt_processed.input_features.dtype == torch.float32) def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ds = ds.cast_column("audio", Audio(sampling_rate=self.feat_extract_tester.sampling_rate)) # automatic decoding with librispeech speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples], [x["sampling_rate"] for x in speech_samples] @slow @require_torch def test_integration(self): # fmt: off EXPECTED_INPUT_FEATURES = torch.tensor( [ -5.0229, -6.1358, -5.8346, -5.4447, -5.6707, -5.8577, -5.0464, -5.0058, -5.6015, -5.6410, -5.4325, -5.6116, -5.3700, -5.7956, -5.3196, -5.3274, -5.9655, -5.6057, -5.8382, -5.9602, -5.9005, -5.9123, -5.7669, -6.1441, -5.5168, -5.1405, -5.3927, -6.0032, -5.5784, -5.3728 ], ) # fmt: on input_speech, sr = self._load_datasamples(1) feature_extractor = UnivNetFeatureExtractor() input_features = feature_extractor(input_speech, sampling_rate=sr[0], return_tensors="pt").input_features self.assertEqual(input_features.shape, (1, 548, 100)) input_features_mean = torch.mean(input_features) input_features_stddev = torch.std(input_features) EXPECTED_MEAN = torch.tensor(-6.18862009) EXPECTED_STDDEV = torch.tensor(2.80845642) torch.testing.assert_close(input_features_mean, EXPECTED_MEAN, atol=5e-5, rtol=5e-6) torch.testing.assert_close(input_features_stddev, EXPECTED_STDDEV) torch.testing.assert_close(input_features[0, :30, 0], EXPECTED_INPUT_FEATURES, atol=1e-4, rtol=1e-5)
transformers/tests/models/univnet/test_feature_extraction_univnet.py/0
{ "file_path": "transformers/tests/models/univnet/test_feature_extraction_univnet.py", "repo_id": "transformers", "token_count": 7246 }
410
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch ViTMAE model. """ import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class ViTMAEModelTester: def __init__( self, parent, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, num_labels=3, mask_ratio=0.6, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.mask_ratio = mask_ratio self.scope = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = int(math.ceil((1 - mask_ratio) * (num_patches + 1))) def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return ViTMAEConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, mask_ratio=self.mask_ratio, decoder_hidden_size=self.hidden_size, decoder_intermediate_size=self.intermediate_size, decoder_num_attention_heads=self.num_attention_heads, decoder_num_hidden_layers=self.num_hidden_layers, ) def create_and_check_model(self, config, pixel_values, labels): model = ViTMAEModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_pretraining(self, config, pixel_values, labels): model = ViTMAEForPreTraining(config) model.to(torch_device) model.eval() result = model(pixel_values) num_patches = (self.image_size // self.patch_size) ** 2 expected_num_channels = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels)) # test greyscale images config.num_channels = 1 model = ViTMAEForPreTraining(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) expected_num_channels = self.patch_size**2 self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class ViTMAEModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as ViTMAE does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": ViTMAEModel} if is_torch_available() else {} test_pruning = False test_torchscript = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = ViTMAEModelTester(self) self.config_tester = ConfigTester(self, config_class=ViTMAEConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="ViTMAE does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) # overwrite from common since ViTMAEForPretraining has random masking, we need to fix the noise # to generate masks during test def check_pt_tf_models(self, tf_model, pt_model, pt_inputs_dict): # make masks reproducible np.random.seed(2) num_patches = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2) noise = np.random.uniform(size=(self.model_tester.batch_size, num_patches)) pt_noise = torch.from_numpy(noise) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument pt_inputs_dict["noise"] = pt_noise super().check_pt_tf_models(tf_model, pt_model, pt_inputs_dict) def test_save_load(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() # make random mask reproducible torch.manual_seed(2) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) out_2 = outputs[0].cpu().numpy() out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname) model.to(torch_device) # make random mask reproducible torch.manual_seed(2) with torch.no_grad(): after_outputs = model(**self._prepare_for_class(inputs_dict, model_class)) # Make sure we don't have nans out_1 = after_outputs[0].cpu().numpy() out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def test_determinism(self): pass @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def test_save_load_fast_init_from_base(self): pass @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def test_save_load_fast_init_to_base(self): pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""") def test_model_outputs_equivalence(self): pass @slow def test_model_from_pretrained(self): for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ViTMAEModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class ViTMAEModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ViTImageProcessor.from_pretrained("facebook/vit-mae-base") if is_vision_available() else None @slow def test_inference_for_pretraining(self): # make random mask reproducible across the PT and TF model np.random.seed(2) model = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) vit_mae_config = ViTMAEConfig() num_patches = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2) noise = np.random.uniform(size=(1, num_patches)) # forward pass with torch.no_grad(): outputs = model(**inputs, noise=torch.from_numpy(noise).to(device=torch_device)) # verify the logits expected_shape = torch.Size((1, 196, 768)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice.to(torch_device), atol=1e-4))
transformers/tests/models/vit_mae/test_modeling_vit_mae.py/0
{ "file_path": "transformers/tests/models/vit_mae/test_modeling_vit_mae.py", "repo_id": "transformers", "token_count": 5323 }
411
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import math import multiprocessing import traceback import unittest import numpy as np from datasets import load_dataset from transformers import Wav2Vec2Config, is_flax_available from transformers.testing_utils import ( CaptureLogger, is_flaky, is_librosa_available, is_pt_flax_cross_test, is_pyctcdecode_available, require_flax, require_librosa, require_pyctcdecode, require_soundfile, run_test_in_subprocess, slow, ) from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp import optax from flax.traverse_util import flatten_dict from transformers import Wav2Vec2FeatureExtractor, Wav2Vec2Processor from transformers.models.wav2vec2.modeling_flax_wav2vec2 import ( FlaxWav2Vec2ForCTC, FlaxWav2Vec2ForPreTraining, FlaxWav2Vec2GumbelVectorQuantizer, FlaxWav2Vec2Model, _compute_mask_indices, _sample_negative_indices, ) if is_pyctcdecode_available(): import pyctcdecode.decoder from transformers import Wav2Vec2ProcessorWithLM from transformers.models.wav2vec2_with_lm import processing_wav2vec2_with_lm if is_librosa_available(): import librosa def _test_wav2vec2_with_lm_invalid_pool(in_queue, out_queue, timeout): error = None try: _ = in_queue.get(timeout=timeout) ds = load_dataset("common_voice", "es", split="test", streaming=True) sample = next(iter(ds)) resampled_audio = librosa.resample(sample["audio"]["array"], 48_000, 16_000) model = FlaxWav2Vec2ForCTC.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm") processor = Wav2Vec2ProcessorWithLM.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm") input_values = processor(resampled_audio, return_tensors="np").input_values logits = model(input_values).logits # use a spawn pool, which should trigger a warning if different than fork with CaptureLogger(pyctcdecode.decoder.logger) as cl, multiprocessing.get_context("spawn").Pool(1) as pool: transcription = processor.batch_decode(np.array(logits), pool).text unittest.TestCase().assertIn("Falling back to sequential decoding.", cl.out) unittest.TestCase().assertEqual(transcription[0], "bien y qué regalo vas a abrir primero") # force batch_decode to internally create a spawn pool, which should trigger a warning if different than fork multiprocessing.set_start_method("spawn", force=True) with CaptureLogger(processing_wav2vec2_with_lm.logger) as cl: transcription = processor.batch_decode(np.array(logits)).text unittest.TestCase().assertIn("Falling back to sequential decoding.", cl.out) unittest.TestCase().assertEqual(transcription[0], "bien y qué regalo vas a abrir primero") except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() class FlaxWav2Vec2ModelTester: def __init__( self, parent, batch_size=13, seq_length=1024, # speech is longer is_training=False, hidden_size=24, feat_extract_norm="layer", feat_extract_dropout=0.0, feat_extract_activation="gelu", conv_dim=(32, 32, 32), conv_stride=(4, 4, 4), conv_kernel=(8, 8, 8), conv_bias=False, num_conv_pos_embeddings=16, num_conv_pos_embedding_groups=2, num_hidden_layers=2, num_attention_heads=2, hidden_dropout_prob=0.1, # this is most likely not correctly set yet intermediate_size=20, layer_norm_eps=1e-5, hidden_act="gelu", initializer_range=0.02, vocab_size=32, do_stable_layer_norm=True, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_dropout = feat_extract_dropout self.feat_extract_activation = feat_extract_activation self.conv_dim = conv_dim self.conv_stride = conv_stride self.conv_kernel = conv_kernel self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout_prob = hidden_dropout_prob self.intermediate_size = intermediate_size self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.vocab_size = vocab_size self.do_stable_layer_norm = do_stable_layer_norm self.scope = scope output_seq_length = self.seq_length for kernel, stride in zip(self.conv_kernel, self.conv_stride): output_seq_length = (output_seq_length - (kernel - 1)) / stride self.output_seq_length = int(math.ceil(output_seq_length)) self.encoder_seq_length = self.output_seq_length def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.seq_length], scale=1.0) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = Wav2Vec2Config( do_stable_layer_norm=self.do_stable_layer_norm, hidden_size=self.hidden_size, feat_extract_norm=self.feat_extract_norm, feat_extract_dropout=self.feat_extract_dropout, feat_extract_activation=self.feat_extract_activation, conv_dim=self.conv_dim, conv_stride=self.conv_stride, conv_kernel=self.conv_kernel, conv_bias=self.conv_bias, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_dropout_prob=self.hidden_dropout_prob, intermediate_size=self.intermediate_size, layer_norm_eps=self.layer_norm_eps, hidden_act=self.hidden_act, initializer_range=self.initializer_range, vocab_size=self.vocab_size, ) return config, input_values, attention_mask def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_values, attention_mask = config_and_inputs inputs_dict = {"input_values": input_values, "attention_mask": attention_mask} return config, inputs_dict @require_flax class FlaxWav2Vec2ModelTest(FlaxModelTesterMixin, unittest.TestCase): all_model_classes = ( (FlaxWav2Vec2Model, FlaxWav2Vec2ForCTC, FlaxWav2Vec2ForPreTraining) if is_flax_available() else () ) def setUp(self): self.model_tester = FlaxWav2Vec2ModelTester(self) def test_train(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_values = inputs_dict["input_values"] attention_mask = inputs_dict["attention_mask"] model = FlaxWav2Vec2ForPreTraining(config) features_shape = ( input_values.shape[0], model._get_feat_extract_output_lengths(np.array(input_values.shape[1])), ) batch_size, sequence_length = features_shape[:2] mask_prob = 0.5 mask_length = 4 mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) dropout_rng, gumbel_rng = jax.random.split(jax.random.PRNGKey(0)) output = model( input_values, attention_mask=attention_mask, mask_time_indices=mask_time_indices, train=True, dropout_rng=dropout_rng, gumbel_rng=gumbel_rng, )[0] self.assertTrue(output.shape == (batch_size, sequence_length, model.config.proj_codevector_dim)) # overwrite because of `input_values` def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.__call__) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["input_values", "attention_mask"] self.assertListEqual(arg_names[:2], expected_arg_names) # overwrite because of `input_values` def test_jit_compilation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def model_jitted(input_values, attention_mask=None, **kwargs): return model(input_values=input_values, attention_mask=attention_mask, **kwargs) with self.subTest("JIT Enabled"): jitted_outputs = model_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = model_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def test_freeze_feature_encoder(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_values = inputs_dict["input_values"] attention_mask = inputs_dict["attention_mask"] model = FlaxWav2Vec2ForPreTraining(config) params = model.params # dummy loss function def compute_loss( params, input_values, attention_mask, freeze_feature_encoder: bool = False, epsilon: float = 1e-8 ): outputs = model( input_values, attention_mask=attention_mask, freeze_feature_encoder=freeze_feature_encoder, params=params, ) # compute cosine similarity of projected and projected_quantized states cosine_sim = optax.cosine_similarity( outputs.projected_states, outputs.projected_quantized_states, epsilon=epsilon ) loss = cosine_sim.sum() return loss, outputs.to_tuple() # transform the loss function to get the gradients grad_fn = jax.value_and_grad(compute_loss, has_aux=True) # compute loss, outputs and gradients for unfrozen model (loss, outputs), grads = grad_fn(params, input_values, attention_mask, freeze_feature_encoder=False) # compare to loss, outputs and gradients for frozen model (loss_frozen, outputs_frozen), grads_frozen = grad_fn( params, input_values, attention_mask, freeze_feature_encoder=True ) # ensure that the outputs and losses remain precisely equal for output, output_frozen in zip(outputs, outputs_frozen): self.assertTrue((output == output_frozen).all()) self.assertEqual(loss, loss_frozen) grads = flatten_dict(grads) grads_frozen = flatten_dict(grads_frozen) # ensure that the dicts of gradients contain the same keys self.assertEqual(grads.keys(), grads_frozen.keys()) # ensure that the gradients of the feature extractor layers are precisely zero when frozen and contain non-zero entries when unfrozen feature_extractor_grads = tuple(grads[k] for k in grads if "feature_extractor" in k) feature_extractor_grads_frozen = tuple(grads_frozen[k] for k in grads_frozen if "feature_extractor" in k) for feature_extractor_grad, feature_extractor_grad_frozen in zip( feature_extractor_grads, feature_extractor_grads_frozen ): self.assertTrue((feature_extractor_grad_frozen == 0.0).all()) self.assertTrue((feature_extractor_grad > 0.0).any()) # ensure that the gradients of all unfrozen layers remain equal, i.e. all layers excluding the frozen 'feature_extractor' grads = tuple(grads[k] for k in grads if "feature_extractor" not in k) grads_frozen = tuple(grads_frozen[k] for k in grads_frozen if "feature_extractor" not in k) for grad, grad_frozen in zip(grads, grads_frozen): self.assertTrue((grad == grad_frozen).all()) @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("facebook/wav2vec2-large-960h-lv60-self", from_pt=True) outputs = model(np.ones((1, 1024), dtype="f4")) self.assertIsNotNone(outputs) @is_pt_flax_cross_test @is_flaky() def test_equivalence_pt_to_flax(self): super().test_equivalence_pt_to_flax() @require_flax class FlaxWav2Vec2UtilsTest(unittest.TestCase): def test_compute_mask_indices(self): batch_size = 4 sequence_length = 60 mask_prob = 0.5 mask_length = 1 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) self.assertListEqual(mask.sum(axis=-1).tolist(), [mask_prob * sequence_length for _ in range(batch_size)]) def test_compute_mask_indices_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) # because of overlap mask don't have to add up exactly to `mask_prob * sequence_length`, but have to be smaller or equal for batch_sum in mask.sum(axis=-1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) def test_compute_mask_indices_attn_mask_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 attention_mask = np.ones((batch_size, sequence_length), dtype=np.int32) attention_mask[:2, sequence_length // 2 :] = 0 mask = _compute_mask_indices( (batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask ) for batch_sum in mask.sum(axis=-1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) self.assertTrue(mask[:2, sequence_length // 2 :].sum() == 0) def test_compute_perplexity(self): probs = np.arange(100).reshape(2, 5, 10) / 100 ppl = FlaxWav2Vec2GumbelVectorQuantizer._compute_perplexity(probs) self.assertTrue(abs(ppl.item() - 141.4291) < 1e-3) # mask half of the input mask = np.ones((2,), dtype=bool) mask[0] = 0 ppl = FlaxWav2Vec2GumbelVectorQuantizer._compute_perplexity(probs, mask) self.assertTrue(abs(ppl.item() - 58.6757) < 1e-3) def test_sample_negatives(self): batch_size = 2 sequence_length = 10 hidden_size = 4 num_negatives = 3 features = (np.arange(sequence_length * hidden_size) // hidden_size).reshape( sequence_length, hidden_size ) # each value in vector consits of same value features = np.broadcast_to(features[None, :], (batch_size, sequence_length, hidden_size)) negative_indices = _sample_negative_indices(features.shape, num_negatives) features = features.reshape(-1, hidden_size) # BTC => (BxT)C # take negative vectors from sampled indices sampled_negatives = features[negative_indices.reshape(-1)] negatives = sampled_negatives.reshape(batch_size, sequence_length, num_negatives, hidden_size).transpose( 2, 0, 1, 3 ) self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size)) # make sure no negatively sampled vector is actually a positive one for negative in negatives: self.assertTrue(((negative - features.reshape(negative.shape)) == 0).sum() == 0.0) # make sure that full vectors are sampled and not values of vectors # => this means that `unique()` yields a single value for `hidden_size` dim self.assertEqual(np.unique(negatives, axis=-1).shape, (num_negatives, batch_size, sequence_length, 1)) def test_sample_negatives_with_attn_mask(self): batch_size = 2 sequence_length = 10 hidden_size = 4 num_negatives = 3 features = (np.arange(sequence_length * hidden_size) // hidden_size).reshape( sequence_length, hidden_size ) # each value in vector consits of same value # second half of last input tensor is padded attention_mask = np.ones((batch_size, sequence_length), dtype=np.int8) attention_mask[-1, sequence_length // 2 :] = 0 forbidden_indices = ( np.arange(sequence_length // 2, sequence_length, dtype=np.int32) + (batch_size - 1) * sequence_length ).tolist() features = np.broadcast_to(features[None, :], (batch_size, sequence_length, hidden_size)) negative_indices = _sample_negative_indices(features.shape, num_negatives, attention_mask=attention_mask) # make sure that no padding tokens are sampled self.assertTrue(all(idx not in negative_indices for idx in forbidden_indices)) features = features.reshape(-1, hidden_size) # BTC => (BxT)C # take negative vectors from sampled indices sampled_negatives = features[negative_indices.reshape(-1)] negatives = sampled_negatives.reshape(batch_size, sequence_length, num_negatives, hidden_size).transpose( 2, 0, 1, 3 ) self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size)) # make sure no negatively sampled vector is actually a positive one for negative in negatives: self.assertTrue(((negative - features.reshape(negative.shape)) == 0).sum() == 0.0) # make sure that full vectors are sampled and not just slices of vectors # => this means that `unique()` yields a single value for `hidden_size` dim self.assertEqual(np.unique(negatives, axis=-1).shape, (num_negatives, batch_size, sequence_length, 1)) @require_flax @require_soundfile @slow class FlaxWav2Vec2ModelIntegrationTest(unittest.TestCase): def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").filter( lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)] )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_inference_ctc_robust_batched(self): model = FlaxWav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h-lv60-self", from_pt=True) processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-large-960h-lv60-self", do_lower_case=True) input_speech = self._load_datasamples(4) inputs = processor(input_speech, return_tensors="np", padding=True) input_values = inputs.input_values attention_mask = inputs.attention_mask logits = model(input_values, attention_mask=attention_mask).logits predicted_ids = jnp.argmax(logits, axis=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "a man said to the universe sir i exist", "sweat covered brion's body trickling into the tight loin cloth that was the only garment he wore", "the cut on his chest still dripping blood the ache of his overstrained eyes even the soaring arena around" " him with the thousands of spectators were trivialities not worth thinking about", "his instant panic was followed by a small sharp blow high on his chest", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) def test_inference_pretrained(self): model = FlaxWav2Vec2ForPreTraining.from_pretrained("facebook/wav2vec2-large-lv60", from_pt=True) feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( "facebook/wav2vec2-large-lv60", return_attention_mask=True ) input_speech = self._load_datasamples(2) inputs_dict = feature_extractor(input_speech, return_tensors="np", padding=True) features_shape = ( inputs_dict["input_values"].shape[0], model._get_feat_extract_output_lengths(np.array(inputs_dict["input_values"].shape[1])), ) mask_time_indices = _compute_mask_indices( features_shape, model.config.mask_time_prob, model.config.mask_time_length, min_masks=2, ) outputs = model( inputs_dict.input_values, attention_mask=inputs_dict.attention_mask, mask_time_indices=mask_time_indices, ) # compute cosine similarity cosine_sim = optax.cosine_similarity( outputs.projected_states, outputs.projected_quantized_states, epsilon=1e-8 ) # retrieve cosine sim of masked features cosine_sim_masked = cosine_sim[mask_time_indices] # ... now compare to randomly initialized model config = Wav2Vec2Config.from_pretrained("facebook/wav2vec2-large-lv60") model_rand = FlaxWav2Vec2ForPreTraining(config) outputs_rand = model_rand( inputs_dict.input_values, attention_mask=inputs_dict.attention_mask, mask_time_indices=mask_time_indices, ) # compute cosine similarity cosine_sim_rand = optax.cosine_similarity( outputs_rand.projected_states, outputs_rand.projected_quantized_states ) # retrieve cosine sim of masked features cosine_sim_masked_rand = cosine_sim_rand[mask_time_indices] # a pretrained wav2vec2 model has learned to predict the quantized latent states # => the cosine similarity between quantized states and predicted states > 0.5 # a random wav2vec2 model has not learned to predict the quantized latent states # => the cosine similarity between quantized states and predicted states is very likely < 0.1 self.assertTrue(cosine_sim_masked.mean().item() - 5 * cosine_sim_masked_rand.mean().item() > 0) @require_pyctcdecode @require_librosa def test_wav2vec2_with_lm(self): ds = load_dataset("common_voice", "es", split="test", streaming=True) sample = next(iter(ds)) resampled_audio = librosa.resample(sample["audio"]["array"], 48_000, 16_000) model = FlaxWav2Vec2ForCTC.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm") processor = Wav2Vec2ProcessorWithLM.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm") input_values = processor(resampled_audio, return_tensors="np").input_values logits = model(input_values).logits transcription = processor.batch_decode(np.array(logits)).text self.assertEqual(transcription[0], "bien y qué regalo vas a abrir primero") @require_pyctcdecode @require_librosa def test_wav2vec2_with_lm_pool(self): ds = load_dataset("common_voice", "es", split="test", streaming=True) sample = next(iter(ds)) resampled_audio = librosa.resample(sample["audio"]["array"], 48_000, 16_000) model = FlaxWav2Vec2ForCTC.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm") processor = Wav2Vec2ProcessorWithLM.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm") input_values = processor(resampled_audio, return_tensors="np").input_values logits = model(input_values).logits # test user-managed pool with multiprocessing.get_context("fork").Pool(2) as pool: transcription = processor.batch_decode(np.array(logits), pool).text self.assertEqual(transcription[0], "bien y qué regalo vas a abrir primero") # user-managed pool + num_processes should trigger a warning with CaptureLogger(processing_wav2vec2_with_lm.logger) as cl, multiprocessing.get_context("fork").Pool( 2 ) as pool: transcription = processor.batch_decode(np.array(logits), pool, num_processes=2).text self.assertIn("num_process", cl.out) self.assertIn("it will be ignored", cl.out) self.assertEqual(transcription[0], "bien y qué regalo vas a abrir primero") @require_pyctcdecode @require_librosa def test_wav2vec2_with_lm_invalid_pool(self): run_test_in_subprocess(test_case=self, target_func=_test_wav2vec2_with_lm_invalid_pool, inputs=None)
transformers/tests/models/wav2vec2/test_modeling_flax_wav2vec2.py/0
{ "file_path": "transformers/tests/models/wav2vec2/test_modeling_flax_wav2vec2.py", "repo_id": "transformers", "token_count": 11180 }
412
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class XLMModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_lengths=True, use_token_type_ids=True, use_labels=True, gelu_activation=True, sinusoidal_embeddings=False, causal=False, asm=False, n_langs=2, vocab_size=99, n_special=0, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_sequence_label_size=2, initializer_range=0.02, num_labels=2, num_choices=4, summary_type="last", use_proj=True, scope=None, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_lengths = use_input_lengths self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.gelu_activation = gelu_activation self.sinusoidal_embeddings = sinusoidal_embeddings self.causal = causal self.asm = asm self.n_langs = n_langs self.vocab_size = vocab_size self.n_special = n_special self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.summary_type = summary_type self.use_proj = use_proj self.scope = scope self.bos_token_id = bos_token_id def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = random_attention_mask([self.batch_size, self.seq_length]) input_lengths = None if self.use_input_lengths: input_lengths = ( ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2 ) # small variation of seq_length token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.n_langs) sequence_labels = None token_labels = None is_impossible_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) is_impossible_labels = ids_tensor([self.batch_size], 2).float() choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def get_config(self): return XLMConfig( vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, num_labels=self.num_labels, bos_token_id=self.bos_token_id, ) def create_and_check_xlm_model( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = XLMModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, lengths=input_lengths, langs=token_type_ids) result = model(input_ids, langs=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_xlm_lm_head( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = XLMWithLMHeadModel(config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_xlm_simple_qa( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = XLMForQuestionAnsweringSimple(config) model.to(torch_device) model.eval() outputs = model(input_ids) outputs = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels) result = outputs self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_xlm_qa( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = XLMForQuestionAnswering(config) model.to(torch_device) model.eval() result = model(input_ids) result_with_labels = model( input_ids, start_positions=sequence_labels, end_positions=sequence_labels, cls_index=sequence_labels, is_impossible=is_impossible_labels, p_mask=input_mask, ) result_with_labels = model( input_ids, start_positions=sequence_labels, end_positions=sequence_labels, cls_index=sequence_labels, is_impossible=is_impossible_labels, ) (total_loss,) = result_with_labels.to_tuple() result_with_labels = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels) (total_loss,) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape, ()) self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top)) self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top)) self.parent.assertEqual( result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,)) def create_and_check_xlm_sequence_classif( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = XLMForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids) result = model(input_ids, labels=sequence_labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def create_and_check_xlm_token_classif( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): config.num_labels = self.num_labels model = XLMForTokenClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_xlm_for_multiple_choice( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): config.num_choices = self.num_choices model = XLMForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths} return config, inputs_dict @require_torch class XLMModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) all_generative_model_classes = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable pipeline_model_mapping = ( { "feature-extraction": XLMModel, "fill-mask": XLMWithLMHeadModel, "question-answering": XLMForQuestionAnsweringSimple, "text-classification": XLMForSequenceClassification, "text-generation": XLMWithLMHeadModel, "token-classification": XLMForTokenClassification, "zero-shot": XLMForSequenceClassification, } if is_torch_available() else {} ) # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast") ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False # XLM has 2 QA models -> need to manually set the correct labels for one of them here def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": inputs_dict["start_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) inputs_dict["end_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = XLMModelTester(self) self.config_tester = ConfigTester(self, config_class=XLMConfig, emb_dim=37) def test_config(self): self.config_tester.run_common_tests() def test_xlm_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*config_and_inputs) def test_xlm_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*config_and_inputs) def test_xlm_simple_qa(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*config_and_inputs) def test_xlm_qa(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*config_and_inputs) def test_xlm_sequence_classif(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*config_and_inputs) def test_xlm_token_classif(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*config_and_inputs) def test_xlm_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*config_and_inputs) def _check_attentions_for_generate( self, batch_size, attentions, min_length, max_length, config, use_cache=False, num_beam_groups=1 ): self.assertIsInstance(attentions, tuple) self.assertListEqual( [isinstance(iter_attentions, tuple) for iter_attentions in attentions], [True] * len(attentions) ) self.assertEqual(len(attentions), (max_length - min_length) * num_beam_groups) for idx, iter_attentions in enumerate(attentions): # adds PAD dummy token tgt_len = min_length + idx + 1 src_len = min_length + idx + 1 expected_shape = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(iter_attentions) ) def _check_hidden_states_for_generate( self, batch_size, hidden_states, min_length, max_length, config, use_cache=False, num_beam_groups=1 ): self.assertIsInstance(hidden_states, tuple) self.assertListEqual( [isinstance(iter_hidden_states, tuple) for iter_hidden_states in hidden_states], [True] * len(hidden_states), ) self.assertEqual(len(hidden_states), (max_length - min_length) * num_beam_groups) for idx, iter_hidden_states in enumerate(hidden_states): # adds PAD dummy token seq_len = min_length + idx + 1 expected_shape = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states], [expected_shape] * len(iter_hidden_states), ) pass @slow def test_model_from_pretrained(self): for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = XLMModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch class XLMModelLanguageGenerationTest(unittest.TestCase): @slow def test_lm_generate_xlm_mlm_en_2048(self): model = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048") model.to(torch_device) input_ids = torch.tensor([[14, 447]], dtype=torch.long, device=torch_device) # the president expected_output_ids = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference output_ids = model.generate(input_ids, do_sample=False) self.assertListEqual(output_ids[0].cpu().numpy().tolist(), expected_output_ids)
transformers/tests/models/xlm/test_modeling_xlm.py/0
{ "file_path": "transformers/tests/models/xlm/test_modeling_xlm.py", "repo_id": "transformers", "token_count": 9334 }
413
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from huggingface_hub.utils import insecure_hashlib from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass def hashimage(image: Image) -> str: m = insecure_hashlib.md5(image.tobytes()) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class DepthEstimationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def get_test_pipeline(self, model, tokenizer, processor): depth_estimator = DepthEstimationPipeline(model=model, image_processor=processor) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def run_pipeline_test(self, depth_estimator, examples): outputs = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png") self.assertEqual({"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)}, outputs) import datasets # we use revision="refs/pr/1" until the PR is merged # https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1 dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1") outputs = depth_estimator( [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["image"], # LA dataset[1]["image"], # L dataset[2]["image"], ] ) self.assertEqual( [ {"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)}, {"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)}, {"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)}, {"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)}, {"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)}, ], outputs, ) @require_tf @unittest.skip("Depth estimation is not implemented in TF") def test_small_model_tf(self): pass @slow @require_torch def test_large_model_pt(self): model_id = "Intel/dpt-large" depth_estimator = pipeline("depth-estimation", model=model_id) outputs = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg") outputs["depth"] = hashimage(outputs["depth"]) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item()), 29.304) self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item()), 2.662) @require_torch def test_small_model_pt(self): # This is highly irregular to have no small tests. self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT")
transformers/tests/pipelines/test_pipelines_depth_estimation.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_depth_estimation.py", "repo_id": "transformers", "token_count": 1794 }
414
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers import ( MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING, AutoProcessor, TextToAudioPipeline, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, require_torch, require_torch_accelerator, require_torch_or_tf, slow, torch_device, ) from transformers.trainer_utils import set_seed from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class TextToAudioPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING # for now only test text_to_waveform and not text_to_spectrogram @slow @require_torch def test_small_musicgen_pt(self): music_generator = pipeline(task="text-to-audio", model="facebook/musicgen-small", framework="pt") forward_params = { "do_sample": False, "max_new_tokens": 250, } outputs = music_generator("This is a test", forward_params=forward_params) self.assertEqual({"audio": ANY(np.ndarray), "sampling_rate": 32000}, outputs) # test two examples side-by-side outputs = music_generator(["This is a test", "This is a second test"], forward_params=forward_params) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio) # test batching outputs = music_generator( ["This is a test", "This is a second test"], forward_params=forward_params, batch_size=2 ) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio) @slow @require_torch def test_small_bark_pt(self): speech_generator = pipeline(task="text-to-audio", model="suno/bark-small", framework="pt") forward_params = { # Using `do_sample=False` to force deterministic output "do_sample": False, "semantic_max_new_tokens": 100, } outputs = speech_generator("This is a test", forward_params=forward_params) self.assertEqual( {"audio": ANY(np.ndarray), "sampling_rate": 24000}, outputs, ) # test two examples side-by-side outputs = speech_generator( ["This is a test", "This is a second test"], forward_params=forward_params, ) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio) # test other generation strategy forward_params = { "do_sample": True, "semantic_max_new_tokens": 100, "semantic_num_return_sequences": 2, } outputs = speech_generator("This is a test", forward_params=forward_params) audio = outputs["audio"] self.assertEqual(ANY(np.ndarray), audio) # test using a speaker embedding processor = AutoProcessor.from_pretrained("suno/bark-small") temp_inp = processor("hey, how are you?", voice_preset="v2/en_speaker_5") history_prompt = temp_inp["history_prompt"] forward_params["history_prompt"] = history_prompt outputs = speech_generator( ["This is a test", "This is a second test"], forward_params=forward_params, batch_size=2, ) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio) @slow @require_torch_accelerator def test_conversion_additional_tensor(self): speech_generator = pipeline(task="text-to-audio", model="suno/bark-small", framework="pt", device=torch_device) processor = AutoProcessor.from_pretrained("suno/bark-small") forward_params = { "do_sample": True, "semantic_max_new_tokens": 100, } # atm, must do to stay coherent with BarkProcessor preprocess_params = { "max_length": 256, "add_special_tokens": False, "return_attention_mask": True, "return_token_type_ids": False, "padding": "max_length", } outputs = speech_generator( "This is a test", forward_params=forward_params, preprocess_params=preprocess_params, ) temp_inp = processor("hey, how are you?", voice_preset="v2/en_speaker_5") history_prompt = temp_inp["history_prompt"] forward_params["history_prompt"] = history_prompt # history_prompt is a torch.Tensor passed as a forward_param # if generation is successful, it means that it was passed to the right device outputs = speech_generator( "This is a test", forward_params=forward_params, preprocess_params=preprocess_params ) self.assertEqual( {"audio": ANY(np.ndarray), "sampling_rate": 24000}, outputs, ) @slow @require_torch def test_vits_model_pt(self): speech_generator = pipeline(task="text-to-audio", model="facebook/mms-tts-eng", framework="pt") outputs = speech_generator("This is a test") self.assertEqual(outputs["sampling_rate"], 16000) audio = outputs["audio"] self.assertEqual(ANY(np.ndarray), audio) # test two examples side-by-side outputs = speech_generator(["This is a test", "This is a second test"]) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio) # test batching outputs = speech_generator(["This is a test", "This is a second test"], batch_size=2) self.assertEqual(ANY(np.ndarray), outputs[0]["audio"]) @slow @require_torch def test_forward_model_kwargs(self): # use vits - a forward model speech_generator = pipeline(task="text-to-audio", model="kakao-enterprise/vits-vctk", framework="pt") # for reproducibility set_seed(555) outputs = speech_generator("This is a test", forward_params={"speaker_id": 5}) audio = outputs["audio"] with self.assertRaises(TypeError): # assert error if generate parameter outputs = speech_generator("This is a test", forward_params={"speaker_id": 5, "do_sample": True}) forward_params = {"speaker_id": 5} generate_kwargs = {"do_sample": True} with self.assertRaises(ValueError): # assert error if generate_kwargs with forward-only models outputs = speech_generator( "This is a test", forward_params=forward_params, generate_kwargs=generate_kwargs ) self.assertTrue(np.abs(outputs["audio"] - audio).max() < 1e-5) @slow @require_torch def test_generative_model_kwargs(self): # use musicgen - a generative model music_generator = pipeline(task="text-to-audio", model="facebook/musicgen-small", framework="pt") forward_params = { "do_sample": True, "max_new_tokens": 250, } # for reproducibility set_seed(555) outputs = music_generator("This is a test", forward_params=forward_params) audio = outputs["audio"] self.assertEqual(ANY(np.ndarray), audio) # make sure generate kwargs get priority over forward params forward_params = { "do_sample": False, "max_new_tokens": 250, } generate_kwargs = {"do_sample": True} # for reproducibility set_seed(555) outputs = music_generator("This is a test", forward_params=forward_params, generate_kwargs=generate_kwargs) self.assertListEqual(outputs["audio"].tolist(), audio.tolist()) def get_test_pipeline(self, model, tokenizer, processor): speech_generator = TextToAudioPipeline(model=model, tokenizer=tokenizer) return speech_generator, ["This is a test", "Another test"] def run_pipeline_test(self, speech_generator, _): outputs = speech_generator("This is a test") self.assertEqual(ANY(np.ndarray), outputs["audio"]) forward_params = ( {"num_return_sequences": 2, "do_sample": True} if speech_generator.model.can_generate() else {} ) outputs = speech_generator(["This is great !", "Something else"], forward_params=forward_params) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio)
transformers/tests/pipelines/test_pipelines_text_to_audio.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_text_to_audio.py", "repo_id": "transformers", "token_count": 3815 }
415
# coding=utf-8 # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import pytest from transformers import AutoModelForCausalLM, AutoTokenizer, GPTQConfig from transformers.testing_utils import ( is_torch_available, require_accelerate, require_auto_gptq, require_optimum, require_torch_gpu, require_torch_multi_gpu, slow, ) if is_torch_available(): import torch class GPTQConfigTest(unittest.TestCase): def test_bits(self): with self.assertRaises(ValueError): GPTQConfig(bits="") GPTQConfig(bits=1) GPTQConfig(bits=2) GPTQConfig(bits=4) def test_dataset(self): with self.assertRaises(ValueError): GPTQConfig(bits=2, dataset="auto_gpt") GPTQConfig(bits=2, dataset="c4") GPTQConfig(bits=2, dataset="ptb-new") def test_damp_percent(self): with self.assertRaises(ValueError): GPTQConfig(bits=2, damp_percent=10) GPTQConfig(bits=2, damp_percent=-1) GPTQConfig(bits=2, damp_percent="0") GPTQConfig(bits=2, damp_percent=0.01) def test_to_dict(self): quantization_config = GPTQConfig(bits=2) quantization_config.to_dict() def test_from_dict(self): dict = {"bits": 2} quantization_config = GPTQConfig.from_dict(dict) self.assertEqual(dict["bits"], quantization_config.bits) @require_optimum def test_optimum_config(self): from optimum.gptq import GPTQQuantizer config = GPTQConfig(bits=2) optimum_config = GPTQQuantizer.from_dict(config.to_dict_optimum()) self.assertEqual(optimum_config.bits, config.bits) new_config = GPTQConfig.from_dict_optimum(optimum_config.to_dict()) self.assertEqual(optimum_config.bits, new_config.bits) @slow @require_optimum @require_auto_gptq @require_torch_gpu class GPTQTest(unittest.TestCase): model_name = "bigscience/bloom-560m" input_text = "Hello my name is" EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I") EXPECTED_OUTPUTS.add("Hello my name is John, I am a professional photographer and I") EXPECTED_OUTPUTS.add("Hello my name is John, I am a student in the University of") EXPECTED_OUTPUTS.add("Hello my name is John and I am a very good looking man.") EXPECTED_OUTPUTS.add("Hello my name is Alyson, I am a student in the") EXPECTED_OUTPUTS.add("Hello my name is Alyson and I am a very sweet,") # this seems a little small considering that we are doing 4bit quant but we have a small model and ww don't quantize the embeddings EXPECTED_RELATIVE_DIFFERENCE = 1.664253062 bits = 4 group_size = 128 desc_act = False use_exllama = False dataset = [ "auto-gptq is an easy-to-use model quantization library with user-friendly apis, based on GPTQ algorithm." ] device_map = None # called only once for all test in this class @classmethod def setUpClass(cls): """ Setup quantized model """ cls.model_fp16 = AutoModelForCausalLM.from_pretrained( cls.model_name, torch_dtype=torch.float16, device_map=cls.device_map ) cls.mem_fp16 = cls.model_fp16.get_memory_footprint() cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name, use_fast=True) quantization_config = GPTQConfig( bits=cls.bits, dataset=cls.dataset, tokenizer=cls.tokenizer, group_size=cls.group_size, desc_act=cls.desc_act, use_exllama=cls.use_exllama, ) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, torch_dtype=torch.float16, device_map=cls.device_map, quantization_config=quantization_config, ) def test_memory_footprint(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model """ mem_quantized = self.quantized_model.get_memory_footprint() self.assertAlmostEqual(self.mem_fp16 / mem_quantized, self.EXPECTED_RELATIVE_DIFFERENCE) def test_device_and_dtype_assignment(self): r""" Test whether trying to cast (or assigning a device to) a model after quantization will throw an error. Checks also if other models are casted correctly. """ # This should work if self.device_map is None: _ = self.quantized_model.to(0) with self.assertRaises(ValueError): # Tries with a `dtype`` self.quantized_model.to(torch.float16) def test_original_dtype(self): r""" A simple test to check if the model succesfully stores the original dtype """ self.assertTrue(hasattr(self.quantized_model.config, "_pre_quantization_dtype")) self.assertFalse(hasattr(self.model_fp16.config, "_pre_quantization_dtype")) self.assertTrue(self.quantized_model.config._pre_quantization_dtype == torch.float16) def test_quantized_layers_class(self): """ Simple test to check if the model conversion has been done correctly by checking on the class type of the linear layers of the converted models """ from auto_gptq.utils.import_utils import dynamically_import_QuantLinear QuantLinear = dynamically_import_QuantLinear( use_triton=False, desc_act=self.desc_act, group_size=self.group_size, bits=self.bits, disable_exllama=not self.use_exllama, disable_exllamav2=True, ) self.assertTrue(self.quantized_model.transformer.h[0].mlp.dense_4h_to_h.__class__ == QuantLinear) def check_inference_correctness(self, model): r""" Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Check the exactness of the results output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) # Get the generation self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def check_quantized_layers_type(self, model, value): self.assertTrue(model.transformer.h[0].mlp.dense_4h_to_h.QUANT_TYPE == value) def test_generate_quality(self): """ Simple test to check the quality of the model by comparing the generated tokens with the expected tokens """ if self.device_map is None: self.check_inference_correctness(self.quantized_model.to(0)) else: self.check_inference_correctness(self.quantized_model) def test_serialization(self): """ Test the serialization of the model and the loading of the quantized weights works """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) if not self.use_exllama: quantized_model_from_saved = AutoModelForCausalLM.from_pretrained( tmpdirname, quantization_config=GPTQConfig(use_exllama=False, bits=4) ).to(0) self.check_quantized_layers_type(quantized_model_from_saved, "cuda-old") else: # we need to put it directly to the gpu. Otherwise, we won't be able to initialize the exllama kernel quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map={"": 0}) self.check_quantized_layers_type(quantized_model_from_saved, "exllama") self.check_inference_correctness(quantized_model_from_saved) @require_accelerate def test_serialization_big_model_inference(self): """ Test the serialization of the model and the loading of the quantized weights with big model inference """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map="auto") self.check_inference_correctness(quantized_model_from_saved) def test_change_loading_attributes(self): """ Test the serialization of the model and the loading of the quantized weights works with another config file """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) if not self.use_exllama: self.check_quantized_layers_type(self.quantized_model, "cuda-old") # we need to put it directly to the gpu. Otherwise, we won't be able to initialize the exllama kernel quantized_model_from_saved = AutoModelForCausalLM.from_pretrained( tmpdirname, quantization_config=GPTQConfig(use_exllama=True, bits=4), device_map={"": 0} ) self.assertEqual(quantized_model_from_saved.config.quantization_config.bits, self.bits) self.check_quantized_layers_type(quantized_model_from_saved, "exllama") self.check_inference_correctness(quantized_model_from_saved) @require_accelerate @require_torch_multi_gpu class GPTQTestDeviceMap(GPTQTest): device_map = "auto" @require_accelerate @require_torch_multi_gpu class GPTQTestDeviceMapExllama(GPTQTest): device_map = "auto" use_exllama = True @slow @require_optimum @require_auto_gptq @require_torch_gpu @require_accelerate class GPTQTestActOrderExllama(unittest.TestCase): """ Test GPTQ model with exllama kernel and desc_act=True (also known as act-order). More information on those arguments here: https://huggingface.co/docs/transformers/main_classes/quantization#transformers.GPTQConfig """ EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello, how are you ? I'm doing good, thanks for asking.") # 4bit + act_order + 128g model_name = "hf-internal-testing/TinyLlama-1.1B-Chat-v0.3-GPTQ" input_text = "Hello, how are you ?" @classmethod def setUpClass(cls): """ Setup quantized model """ cls.quantization_config = GPTQConfig(bits=4, max_input_length=4028) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, torch_dtype=torch.float16, device_map={"": 0}, quantization_config=cls.quantization_config, ) cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name, use_fast=True) def check_inference_correctness(self, model): """ Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Check the exactness of the results output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) # Get the generation self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_quantized_layers_type(self): self.assertTrue(self.quantized_model.model.layers[0].self_attn.k_proj.QUANT_TYPE == "exllama") def test_generate_quality(self): """ Simple test to check the quality of the model by comparing the generated tokens with the expected tokens """ self.check_inference_correctness(self.quantized_model) def test_max_input_length(self): """ Test if the max_input_length works. It modifies the maximum input length that of the model that runs with exllama backend. """ prompt = "I am in Paris and" * 1000 inp = self.tokenizer(prompt, return_tensors="pt").to(0) self.assertTrue(inp["input_ids"].shape[1] > 4028) with self.assertRaises(RuntimeError) as cm: self.quantized_model.generate(**inp, num_beams=1, min_new_tokens=3, max_new_tokens=3) self.assertTrue("temp_state buffer is too small" in str(cm.exception)) prompt = "I am in Paris and" inp = self.tokenizer(prompt, return_tensors="pt").to(0) self.assertTrue(inp["input_ids"].shape[1] < 4028) self.quantized_model.generate(**inp, num_beams=1, min_new_tokens=3, max_new_tokens=3) @slow @require_optimum @require_auto_gptq @require_torch_gpu @require_accelerate class GPTQTestExllamaV2(unittest.TestCase): """ Test GPTQ model with exllamav2 kernel and desc_act=True (also known as act-order). More information on those arguments here: https://huggingface.co/docs/transformers/main_classes/quantization#transformers.GPTQConfig """ EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello, how are you ? I'm doing good, thanks for asking.") # 4bit + act_order + 128g model_name = "hf-internal-testing/TinyLlama-1.1B-Chat-v0.3-GPTQ" input_text = "Hello, how are you ?" @classmethod def setUpClass(cls): """ Setup quantized model """ cls.quantization_config = GPTQConfig(bits=4, exllama_config={"version": 2}) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, torch_dtype=torch.float16, device_map={"": 0}, quantization_config=cls.quantization_config, ) cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name, use_fast=True) def test_quantized_layers_type(self): self.assertTrue(self.quantized_model.model.layers[0].self_attn.k_proj.QUANT_TYPE == "exllamav2") def check_inference_correctness(self, model): """ Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Check the exactness of the results output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) # Get the generation self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_generate_quality(self): """ Simple test to check the quality of the model by comapring the the generated tokens with the expected tokens """ self.check_inference_correctness(self.quantized_model) # fail when run all together @pytest.mark.skip @require_accelerate @require_torch_multi_gpu class GPTQTestDeviceMapCPUOffload(GPTQTest): device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": 0, "lm_head": 0, "transformer.h.0": 0, "transformer.h.1": 0, "transformer.h.2": 0, "transformer.h.3": 0, "transformer.h.4": 0, "transformer.h.5": 0, "transformer.h.6": 0, "transformer.h.7": 0, "transformer.h.8": 0, "transformer.h.9": 0, "transformer.h.10": 1, "transformer.h.11": 1, "transformer.h.12": 1, "transformer.h.13": 1, "transformer.h.14": 1, "transformer.h.15": 1, "transformer.h.16": 1, "transformer.h.17": 0, "transformer.h.18": "cpu", "transformer.h.19": "cpu", "transformer.h.20": "cpu", "transformer.h.21": "cpu", "transformer.h.22": "cpu", "transformer.h.23": 1, "transformer.ln_f": 0, }
transformers/tests/quantization/gptq/test_gptq.py/0
{ "file_path": "transformers/tests/quantization/gptq/test_gptq.py", "repo_id": "transformers", "token_count": 7230 }
416
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER", "False")) is not True, reason="Skipping test because should only be run when releasing minor transformers version", ) @pytest.mark.usefixtures("sm_env") @parameterized_class( [ { "framework": "pytorch", "script": "run_glue_model_parallelism.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, ] ) class MultiNodeTest(unittest.TestCase): def setUp(self): if self.framework == "pytorch": subprocess.run( f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split(), encoding="utf-8", check=True, ) assert hasattr(self, "env") def create_estimator(self, instance_count): # configuration for running training on smdistributed Model Parallel mpi_options = { "enabled": True, "processes_per_host": 8, } smp_options = { "enabled": True, "parameters": { "microbatches": 4, "placement_strategy": "spread", "pipeline": "interleaved", "optimize": "speed", "partitions": 4, "ddp": True, }, } distribution = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options} name_extension = "trainer" if self.script == "run_glue.py" else "smtrainer" # creates estimator return HuggingFace( entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=f"{self.env.base_job_name}-{instance_count}-smp-{name_extension}", instance_count=instance_count, instance_type=self.instance_type, debugger_hook_config=False, hyperparameters={ **self.env.hyperparameters, "model_name_or_path": self.model_name_or_path, "max_steps": 500, }, metric_definitions=self.env.metric_definitions, distribution=distribution, py_version="py36", ) def save_results_as_csv(self, job_name): TrainingJobAnalytics(job_name).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv") # @parameterized.expand([(2,), (4,),]) @parameterized.expand([(1,)]) def test_scripz(self, instance_count): # create estimator estimator = self.create_estimator(instance_count) # run training estimator.fit() # result dataframe result_metrics_df = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis eval_accuracy = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"]) eval_loss = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"]) # get train time from SageMaker job, this includes starting, preprocessing, stopping train_runtime = ( Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds", 999999) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy) assert all(t <= self.results["eval_loss"] for t in eval_loss) # dump tests result into json file to share in PR with open(f"{estimator.latest_training_job.name}.json", "w") as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, outfile)
transformers/tests/sagemaker/test_multi_node_model_parallel.py/0
{ "file_path": "transformers/tests/sagemaker/test_multi_node_model_parallel.py", "repo_id": "transformers", "token_count": 2097 }
417
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import gc import glob import json import os import os.path import sys import tempfile import unittest import unittest.mock as mock import uuid from pathlib import Path import requests from huggingface_hub import HfApi, HfFolder, delete_repo from huggingface_hub.file_download import http_get from pytest import mark from requests.exceptions import HTTPError from transformers import ( AutoConfig, AutoModel, AutoModelForSequenceClassification, OwlViTForObjectDetection, PretrainedConfig, is_torch_available, logging, ) from transformers.testing_utils import ( TOKEN, USER, CaptureLogger, LoggingLevel, TestCasePlus, is_staging_test, require_accelerate, require_flax, require_safetensors, require_tf, require_torch, require_torch_accelerator, require_torch_gpu, require_torch_multi_accelerator, require_usr_bin_time, slow, torch_device, ) from transformers.utils import ( SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from transformers.utils.import_utils import ( is_flash_attn_2_available, is_flax_available, is_tf_available, is_torch_sdpa_available, is_torchdynamo_available, ) sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_configuration import CustomConfig, NoSuperInitConfig # noqa E402 if is_torch_available(): import torch from safetensors.torch import save_file as safe_save_file from test_module.custom_modeling import CustomModel, NoSuperInitModel from torch import nn from transformers import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, AutoModelForCausalLM, AutoTokenizer, BertConfig, BertModel, CLIPTextModel, PreTrainedModel, T5Config, T5ForConditionalGeneration, ) from transformers.modeling_attn_mask_utils import ( AttentionMaskConverter, _create_4d_causal_attention_mask, _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask, ) from transformers.modeling_utils import shard_checkpoint # Fake pretrained models for tests class BaseModel(PreTrainedModel): base_model_prefix = "base" config_class = PretrainedConfig def __init__(self, config): super().__init__(config) self.linear = nn.Linear(5, 5) self.linear_2 = nn.Linear(5, 5) def forward(self, x): return self.linear_2(self.linear(x)) class BaseModelWithTiedWeights(PreTrainedModel): config_class = PretrainedConfig def __init__(self, config): super().__init__(config) self.linear = nn.Linear(5, 5) self.linear_2 = nn.Linear(5, 5) def forward(self, x): return self.linear_2(self.linear(x)) def tie_weights(self): self.linear_2.weight = self.linear.weight class ModelWithHead(PreTrainedModel): base_model_prefix = "base" config_class = PretrainedConfig def _init_weights(self, module): pass def __init__(self, config): super().__init__(config) self.base = BaseModel(config) # linear is a common name between Base and Head on purpose. self.linear = nn.Linear(5, 5) self.linear2 = nn.Linear(5, 5) def forward(self, x): return self.linear2(self.linear(self.base(x))) class ModelWithHeadAndTiedWeights(PreTrainedModel): base_model_prefix = "base" config_class = PretrainedConfig def _init_weights(self, module): pass def __init__(self, config): super().__init__(config) self.base = BaseModel(config) self.decoder = nn.Linear(5, 5) def forward(self, x): return self.decoder(self.base(x)) def tie_weights(self): self.decoder.weight = self.base.linear.weight class Prepare4dCausalAttentionMaskModel(nn.Module): def forward(self, inputs_embeds): batch_size, seq_length, _ = inputs_embeds.shape past_key_values_length = 4 attention_mask = _prepare_4d_causal_attention_mask( None, (batch_size, seq_length), inputs_embeds, past_key_values_length ) return attention_mask class Create4dCausalAttentionMaskModel(nn.Module): def forward(self, inputs_embeds): batch_size, seq_length, _ = inputs_embeds.shape past_key_values_length = 4 attention_mask = _create_4d_causal_attention_mask( (batch_size, seq_length), dtype=inputs_embeds.dtype, device=inputs_embeds.device, past_key_values_length=past_key_values_length, ) return attention_mask class Prepare4dAttentionMaskModel(nn.Module): def forward(self, mask, inputs_embeds): attention_mask = _prepare_4d_attention_mask(mask, dtype=inputs_embeds.dtype) return attention_mask if is_flax_available(): from transformers import FlaxBertModel if is_tf_available(): from transformers import TFBertModel TINY_T5 = "patrickvonplaten/t5-tiny-random" TINY_BERT_FOR_TOKEN_CLASSIFICATION = "hf-internal-testing/tiny-bert-for-token-classification" TINY_MISTRAL = "hf-internal-testing/tiny-random-MistralForCausalLM" def check_models_equal(model1, model2): models_are_equal = True for model1_p, model2_p in zip(model1.parameters(), model2.parameters()): if model1_p.data.ne(model2_p.data).sum() > 0: models_are_equal = False return models_are_equal @require_torch class ModelUtilsTest(TestCasePlus): @slow def test_model_from_pretrained(self): for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = BertConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, PretrainedConfig) model = BertModel.from_pretrained(model_name) model, loading_info = BertModel.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, PreTrainedModel) self.assertEqual(len(loading_info["missing_keys"]), 0) self.assertEqual(len(loading_info["unexpected_keys"]), 8) self.assertEqual(len(loading_info["mismatched_keys"]), 0) self.assertEqual(len(loading_info["error_msgs"]), 0) config = BertConfig.from_pretrained(model_name, output_attentions=True, output_hidden_states=True) # Not sure this is the intended behavior. TODO fix Lysandre & Thom config.name_or_path = model_name model = BertModel.from_pretrained(model_name, output_attentions=True, output_hidden_states=True) self.assertEqual(model.config.output_hidden_states, True) self.assertEqual(model.config, config) def test_model_from_pretrained_subfolder(self): config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") model = BertModel(config) subfolder = "bert" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(tmp_dir, subfolder)) with self.assertRaises(OSError): _ = BertModel.from_pretrained(tmp_dir) model_loaded = BertModel.from_pretrained(tmp_dir, subfolder=subfolder) self.assertTrue(check_models_equal(model, model_loaded)) def test_model_from_pretrained_subfolder_sharded(self): config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") model = BertModel(config) subfolder = "bert" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(tmp_dir, subfolder), max_shard_size="10KB") with self.assertRaises(OSError): _ = BertModel.from_pretrained(tmp_dir) model_loaded = BertModel.from_pretrained(tmp_dir, subfolder=subfolder) self.assertTrue(check_models_equal(model, model_loaded)) def test_model_from_pretrained_hub_subfolder(self): subfolder = "bert" model_id = "hf-internal-testing/tiny-random-bert-subfolder" with self.assertRaises(OSError): _ = BertModel.from_pretrained(model_id) model = BertModel.from_pretrained(model_id, subfolder=subfolder) self.assertIsNotNone(model) def test_model_from_pretrained_hub_subfolder_sharded(self): subfolder = "bert" model_id = "hf-internal-testing/tiny-random-bert-sharded-subfolder" with self.assertRaises(OSError): _ = BertModel.from_pretrained(model_id) model = BertModel.from_pretrained(model_id, subfolder=subfolder) self.assertIsNotNone(model) def test_model_from_pretrained_with_different_pretrained_model_name(self): model = T5ForConditionalGeneration.from_pretrained(TINY_T5) self.assertIsNotNone(model) logger = logging.get_logger("transformers.configuration_utils") with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: BertModel.from_pretrained(TINY_T5) self.assertTrue("You are using a model of type t5 to instantiate a model of type bert" in cl.out) @require_accelerate def test_model_from_pretrained_with_none_quantization_config(self): # Needs a device_map for to enter the low_cpu_mem branch. We also load AutoModelForSequenceClassification # deliberately to enter the missing keys branch. model = AutoModelForSequenceClassification.from_pretrained( TINY_MISTRAL, device_map="auto", quantization_config=None ) self.assertIsNotNone(model) def test_model_from_config_torch_dtype(self): # test that the model can be instantiated with dtype of user's choice - as long as it's a # float dtype. To make it happen config.torch_dtype needs to be set before instantiating the # model from the config object. config = T5Config.from_pretrained(TINY_T5) model = AutoModel.from_config(config) # XXX: isn't supported # model = T5ForConditionalGeneration.from_config(config) self.assertEqual(model.dtype, torch.float32) model = AutoModel.from_config(config, torch_dtype=torch.float16) self.assertEqual(model.dtype, torch.float16) # torch.set_default_dtype() supports only float dtypes, so will fail with non-float type with self.assertRaises(ValueError): model = AutoModel.from_config(config, torch_dtype=torch.int64) def test_model_from_pretrained_torch_dtype(self): # test that the model can be instantiated with dtype of either # 1. explicit from_pretrained's torch_dtype argument # 2. via autodiscovery by looking at model weights (torch_dtype="auto") # so if a model.half() was saved, we want it to be instantiated as such. # # test an explicit model class, but also AutoModel separately as the latter goes through a different code path model_path = self.get_auto_remove_tmp_dir() # baseline - we know TINY_T5 is fp32 model model = T5ForConditionalGeneration.from_pretrained(TINY_T5) self.assertEqual(model.dtype, torch.float32) def remove_torch_dtype(model_path): file = f"{model_path}/config.json" with open(file, "r", encoding="utf-8") as f: s = json.load(f) s.pop("torch_dtype") with open(file, "w", encoding="utf-8") as f: json.dump(s, f) # test the default fp32 save_pretrained => from_pretrained cycle model.save_pretrained(model_path) model = T5ForConditionalGeneration.from_pretrained(model_path) self.assertEqual(model.dtype, torch.float32) # 1. test torch_dtype="auto" via `config.torch_dtype` model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype="auto") self.assertEqual(model.dtype, torch.float32) # 2. test torch_dtype="auto" via auto-derivation # now remove the torch_dtype entry from config.json and try "auto" again which should # perform auto-derivation from weights remove_torch_dtype(model_path) model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype="auto") self.assertEqual(model.dtype, torch.float32) # test forced loading in fp16 (even though the weights are in fp32) model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype=torch.float16) self.assertEqual(model.dtype, torch.float16) # test fp16 save_pretrained, loaded with auto-detection model = model.half() model.save_pretrained(model_path) # 1. test torch_dtype="auto" via `config.torch_dtype` model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype="auto") self.assertEqual(model.config.torch_dtype, torch.float16) self.assertEqual(model.dtype, torch.float16) # tests `config.torch_dtype` saving with open(f"{model_path}/config.json") as f: config_dict = json.load(f) self.assertEqual(config_dict["torch_dtype"], "float16") # 2. test torch_dtype="auto" via auto-derivation # now same with using config info remove_torch_dtype(model_path) model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype="auto") self.assertEqual(model.dtype, torch.float16) # 3. now retest that AutoModel behaves the same wrt torch_dtype="auto" as T5ForConditionalGeneration model = AutoModel.from_pretrained(model_path, torch_dtype="auto") self.assertEqual(model.dtype, torch.float16) # test fp16 save_pretrained, loaded with the explicit fp16 model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype=torch.float16) self.assertEqual(model.dtype, torch.float16) # test AutoModel separately as it goes through a different path # test auto-detection - as currently TINY_T5 doesn't have torch_dtype entry model = AutoModel.from_pretrained(TINY_T5, torch_dtype="auto") # test that the config object didn't get polluted with torch_dtype="auto" # there was a bug that after this call we ended up with config.torch_dtype=="auto" self.assertNotEqual(model.config.torch_dtype, "auto") # now test the outcome self.assertEqual(model.dtype, torch.float32) model = AutoModel.from_pretrained(TINY_T5, torch_dtype=torch.float16) self.assertEqual(model.dtype, torch.float16) # test model whose first param is not of a floating type, but int model = AutoModel.from_pretrained(TINY_BERT_FOR_TOKEN_CLASSIFICATION, torch_dtype="auto") self.assertEqual(model.dtype, torch.float32) def test_no_super_init_config_and_model(self): config = NoSuperInitConfig(attribute=32) model = NoSuperInitModel(config) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) new_model = NoSuperInitModel.from_pretrained(tmp_dir) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) def test_shard_checkpoint(self): # This is the model we will use, total size 340,000 bytes. model = torch.nn.Sequential( torch.nn.Linear(100, 200, bias=False), # size 80,000 torch.nn.Linear(200, 200, bias=False), # size 160,000 torch.nn.Linear(200, 100, bias=False), # size 80,000 torch.nn.Linear(100, 50, bias=False), # size 20,000 ) state_dict = model.state_dict() with self.subTest("No shard when max size is bigger than model size"): shards, index = shard_checkpoint(state_dict) self.assertIsNone(index) self.assertDictEqual(shards, {WEIGHTS_NAME: state_dict}) with self.subTest("Test sharding, no weights bigger than max size"): shards, index = shard_checkpoint(state_dict, max_shard_size="300kB") # Split is first two layers then last two. self.assertDictEqual( index, { "metadata": {"total_size": 340000}, "weight_map": { "0.weight": "pytorch_model-00001-of-00002.bin", "1.weight": "pytorch_model-00001-of-00002.bin", "2.weight": "pytorch_model-00002-of-00002.bin", "3.weight": "pytorch_model-00002-of-00002.bin", }, }, ) shard1 = {"0.weight": state_dict["0.weight"], "1.weight": state_dict["1.weight"]} shard2 = {"2.weight": state_dict["2.weight"], "3.weight": state_dict["3.weight"]} self.assertDictEqual( shards, {"pytorch_model-00001-of-00002.bin": shard1, "pytorch_model-00002-of-00002.bin": shard2} ) with self.subTest("Test sharding with weights bigger than max size"): shards, index = shard_checkpoint(state_dict, max_shard_size="100kB") # Split is first layer, second layer then last 2. self.assertDictEqual( index, { "metadata": {"total_size": 340000}, "weight_map": { "0.weight": "pytorch_model-00001-of-00003.bin", "1.weight": "pytorch_model-00002-of-00003.bin", "2.weight": "pytorch_model-00003-of-00003.bin", "3.weight": "pytorch_model-00003-of-00003.bin", }, }, ) shard1 = {"0.weight": state_dict["0.weight"]} shard2 = {"1.weight": state_dict["1.weight"]} shard3 = {"2.weight": state_dict["2.weight"], "3.weight": state_dict["3.weight"]} self.assertDictEqual( shards, { "pytorch_model-00001-of-00003.bin": shard1, "pytorch_model-00002-of-00003.bin": shard2, "pytorch_model-00003-of-00003.bin": shard3, }, ) def test_checkpoint_sharding_local_bin(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: # We use the same folder for various sizes to make sure a new save erases the old checkpoint. for max_size in ["50kB", "50kiB", "100kB", "100kiB", "200kB", "200kiB"]: model.save_pretrained(tmp_dir, max_shard_size=max_size, safe_serialization=False) # Get each shard file and its size shard_to_size = {} for shard in os.listdir(tmp_dir): if shard.endswith(".bin"): shard_file = os.path.join(tmp_dir, shard) shard_to_size[shard_file] = os.path.getsize(shard_file) index_file = os.path.join(tmp_dir, WEIGHTS_INDEX_NAME) # Check there is an index but no regular weight file self.assertTrue(os.path.isfile(index_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) # Check a file is bigger than max_size only when it has a single weight for shard_file, size in shard_to_size.items(): if max_size.endswith("kiB"): max_size_int = int(max_size[:-3]) * 2**10 else: max_size_int = int(max_size[:-2]) * 10**3 # Note: pickle adds some junk so the weight of the file can end up being slightly bigger than # the size asked for (since we count parameters) if size >= max_size_int + 50000: state_dict = torch.load(shard_file) self.assertEqual(len(state_dict), 1) # Check the index and the shard files found match with open(index_file, "r", encoding="utf-8") as f: index = json.loads(f.read()) all_shards = set(index["weight_map"].values()) shards_found = {f for f in os.listdir(tmp_dir) if f.endswith(".bin")} self.assertSetEqual(all_shards, shards_found) # Finally, check the model can be reloaded new_model = BertModel.from_pretrained(tmp_dir) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) def test_checkpoint_sharding_from_hub(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded") # the model above is the same as the model below, just a sharded version. ref_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") for p1, p2 in zip(model.parameters(), ref_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) def test_checkpoint_variant_local_bin(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, variant="v2", safe_serialization=False) weights_name = ".".join(WEIGHTS_NAME.split(".")[:-1] + ["v2"] + ["bin"]) weights_file = os.path.join(tmp_dir, weights_name) self.assertTrue(os.path.isfile(weights_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained(tmp_dir) new_model = BertModel.from_pretrained(tmp_dir, variant="v2") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) def test_checkpoint_variant_local_sharded_bin(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, variant="v2", max_shard_size="50kB", safe_serialization=False) weights_index_name = ".".join(WEIGHTS_INDEX_NAME.split(".")[:-1] + ["v2"] + ["json"]) weights_index_file = os.path.join(tmp_dir, weights_index_name) self.assertTrue(os.path.isfile(weights_index_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_INDEX_NAME))) for i in range(1, 5): weights_name = ".".join(WEIGHTS_NAME.split(".")[:-1] + [f"v2-0000{i}-of-00005"] + ["bin"]) weights_name_file = os.path.join(tmp_dir, weights_name) self.assertTrue(os.path.isfile(weights_name_file)) with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained(tmp_dir) new_model = BertModel.from_pretrained(tmp_dir, variant="v2") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) @require_safetensors def test_checkpoint_variant_local_safe(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, variant="v2", safe_serialization=True) weights_name = ".".join(SAFE_WEIGHTS_NAME.split(".")[:-1] + ["v2"] + ["safetensors"]) weights_file = os.path.join(tmp_dir, weights_name) self.assertTrue(os.path.isfile(weights_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained(tmp_dir) new_model = BertModel.from_pretrained(tmp_dir, variant="v2") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) @require_safetensors def test_checkpoint_variant_local_sharded_safe(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, variant="v2", max_shard_size="50kB", safe_serialization=True) weights_index_name = ".".join(SAFE_WEIGHTS_INDEX_NAME.split(".")[:-1] + ["v2"] + ["json"]) weights_index_file = os.path.join(tmp_dir, weights_index_name) self.assertTrue(os.path.isfile(weights_index_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME))) for i in range(1, 5): weights_name = ".".join(SAFE_WEIGHTS_NAME.split(".")[:-1] + [f"v2-0000{i}-of-00005"] + ["safetensors"]) weights_name_file = os.path.join(tmp_dir, weights_name) self.assertTrue(os.path.isfile(weights_name_file)) with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained(tmp_dir) new_model = BertModel.from_pretrained(tmp_dir, variant="v2") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) def test_checkpoint_variant_hub(self): with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-variant", cache_dir=tmp_dir) model = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant", cache_dir=tmp_dir, variant="v2" ) self.assertIsNotNone(model) def test_checkpoint_variant_hub_sharded(self): with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant-sharded", cache_dir=tmp_dir ) model = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant-sharded", cache_dir=tmp_dir, variant="v2" ) self.assertIsNotNone(model) @require_safetensors def test_checkpoint_variant_hub_safe(self): with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-variant-safe", cache_dir=tmp_dir) model = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant-safe", cache_dir=tmp_dir, variant="v2" ) self.assertIsNotNone(model) @require_safetensors def test_checkpoint_variant_hub_sharded_safe(self): with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant-sharded-safe", cache_dir=tmp_dir ) model = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant-sharded-safe", cache_dir=tmp_dir, variant="v2" ) self.assertIsNotNone(model) def test_checkpoint_variant_save_load_bin(self): with tempfile.TemporaryDirectory() as tmp_dir: model = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant", cache_dir=tmp_dir, variant="v2" ) weights_name = ".".join(WEIGHTS_NAME.split(".")[:-1] + ["v2"] + ["bin"]) model.save_pretrained(tmp_dir, variant="v2", safe_serialization=False) # saving will create a variant checkpoint self.assertTrue(os.path.isfile(os.path.join(tmp_dir, weights_name))) model.save_pretrained(tmp_dir, safe_serialization=False) # saving shouldn't delete variant checkpoints weights_name = ".".join(WEIGHTS_NAME.split(".")[:-1] + ["v2"] + ["bin"]) self.assertTrue(os.path.isfile(os.path.join(tmp_dir, weights_name))) # there should be a normal checkpoint self.assertTrue(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) self.assertIsNotNone(model) @require_accelerate @mark.accelerate_tests def test_from_pretrained_low_cpu_mem_usage_functional(self): # test that we can use `from_pretrained(..., low_cpu_mem_usage=True)` with normal and # sharded models mnames = [ "hf-internal-testing/tiny-random-bert-sharded", "hf-internal-testing/tiny-random-bert", ] for mname in mnames: _ = BertModel.from_pretrained(mname, low_cpu_mem_usage=True) @require_usr_bin_time @require_accelerate @mark.accelerate_tests def test_from_pretrained_low_cpu_mem_usage_measured(self): # test that `from_pretrained(..., low_cpu_mem_usage=True)` uses less cpu memory than default mname = "bert-base-cased" preamble = "from transformers import AutoModel" one_liner_str = f'{preamble}; AutoModel.from_pretrained("{mname}", low_cpu_mem_usage=False)' max_rss_normal = self.python_one_liner_max_rss(one_liner_str) # print(f"{max_rss_normal=}") one_liner_str = f'{preamble}; AutoModel.from_pretrained("{mname}", low_cpu_mem_usage=True)' max_rss_low_mem = self.python_one_liner_max_rss(one_liner_str) # print(f"{max_rss_low_mem=}") diff_bytes = max_rss_normal - max_rss_low_mem diff_percent = diff_bytes / max_rss_low_mem # print(f"{diff_bytes=}, {diff_percent=}") # ideally we would compare that the diff is close to ~1x checkpoint size in bytes, but # measuring cpu memory on linux is very tricky and inconsistent, so instead let's check that # it's at least 15% less cpu memory consumed self.assertGreater( diff_percent, 0.15, "should use less CPU memory for low_cpu_mem_usage=True, " f"but got max_rss_normal={max_rss_normal} and max_rss_low_mem={max_rss_low_mem}", ) # if you want to compare things manually, let's first look at the size of the model in bytes # model = BertModel.from_pretrained(mname, low_cpu_mem_usage=False) # total_numel = sum(dict((p.data_ptr(), p.numel()) for p in model.parameters()).values()) # total_bytes = total_numel * 4 # 420MB # Now the diff_bytes should be very close to total_bytes, but the reports are inconsistent. # The easiest way to test this is to switch the model and torch.load to do all the work on # gpu - that way one can measure exactly the total and peak memory used. Perhaps once we add # functionality to load models directly on gpu, this test can be rewritten to use torch's # cuda memory tracking and then we should be able to do a much more precise test. @require_accelerate @mark.accelerate_tests @require_torch_multi_accelerator @slow def test_model_parallelism_gpt2(self): device_map = {"transformer.wte": 0, "transformer.wpe": 0, "lm_head": 0, "transformer.ln_f": 1} for i in range(12): device_map[f"transformer.h.{i}"] = 0 if i <= 5 else 1 model = AutoModelForCausalLM.from_pretrained("gpt2", device_map=device_map) tokenizer = AutoTokenizer.from_pretrained("gpt2") inputs = tokenizer("Hello, my name is", return_tensors="pt") output = model.generate(inputs["input_ids"].to(0)) text_output = tokenizer.decode(output[0].tolist()) self.assertEqual(text_output, "Hello, my name is John. I'm a writer, and I'm a writer. I'm") @require_accelerate @mark.accelerate_tests @require_torch_accelerator def test_from_pretrained_disk_offload_task_model(self): model = AutoModel.from_pretrained("hf-internal-testing/tiny-random-gpt2") device_map = { "transformer.wte": 0, "transformer.wpe": 0, "transformer.h.0": "cpu", "transformer.h.1": "cpu", "transformer.h.2": "cpu", "transformer.h.3": "disk", "transformer.h.4": "disk", "transformer.ln_f": 0, "lm_head": 0, } with tempfile.TemporaryDirectory() as tmp_dir: inputs = torch.tensor([[1, 2, 3]]).to(0) model.save_pretrained(tmp_dir) new_model = AutoModelForCausalLM.from_pretrained(tmp_dir).to(0) outputs1 = new_model.to(0)(inputs) offload_folder = os.path.join(tmp_dir, "offload") new_model_with_offload = AutoModelForCausalLM.from_pretrained( tmp_dir, device_map=device_map, offload_folder=offload_folder ) outputs2 = new_model_with_offload(inputs) self.assertTrue(torch.allclose(outputs1.logits.cpu(), outputs2.logits.cpu())) # With state dict temp offload offload_folder = os.path.join(tmp_dir, "offload") new_model_with_offload = AutoModelForCausalLM.from_pretrained( tmp_dir, device_map=device_map, offload_folder=offload_folder, offload_state_dict=True, ) outputs2 = new_model_with_offload(inputs) self.assertTrue(torch.allclose(outputs1.logits.cpu(), outputs2.logits.cpu())) @require_accelerate @mark.accelerate_tests @require_torch_accelerator def test_from_pretrained_disk_offload_derived_to_base_model(self): derived_model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2") device_map = { "wte": 0, "wpe": 0, "h.0": "cpu", "h.1": "cpu", "h.2": "cpu", "h.3": "disk", "h.4": "disk", "ln_f": 0, } with tempfile.TemporaryDirectory() as tmp_dir: inputs = torch.tensor([[1, 2, 3]]).to(0) derived_model.save_pretrained(tmp_dir, use_safetensors=True) base_model = AutoModel.from_pretrained(tmp_dir) outputs1 = base_model.to(0)(inputs) # with disk offload offload_folder = os.path.join(tmp_dir, "offload") base_model_with_offload = AutoModel.from_pretrained( tmp_dir, device_map=device_map, offload_folder=offload_folder ) outputs2 = base_model_with_offload(inputs) self.assertTrue(torch.allclose(outputs1[0].cpu(), outputs2[0].cpu())) # With state dict temp offload new_model_with_offload = AutoModel.from_pretrained( tmp_dir, device_map=device_map, offload_folder=offload_folder, offload_state_dict=True, ) outputs2 = new_model_with_offload(inputs) self.assertTrue(torch.allclose(outputs1[0].cpu(), outputs2[0].cpu())) @slow @require_torch def test_from_pretrained_non_contiguous_checkpoint(self): # See: https://github.com/huggingface/transformers/pull/28414 # Tiny models on the Hub have contiguous weights, contrarily to google/owlvit model = OwlViTForObjectDetection.from_pretrained("fxmarty/owlvit-tiny-non-contiguous-weight") self.assertTrue(model.owlvit.visual_projection.weight.is_contiguous()) model = OwlViTForObjectDetection.from_pretrained( "fxmarty/owlvit-tiny-non-contiguous-weight", device_map="auto" ) self.assertTrue(model.owlvit.visual_projection.weight.is_contiguous()) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=False) model.save_pretrained(tmp_dir, safe_serialization=True) def test_cached_files_are_used_when_internet_is_down(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # Download this model to make sure it's in the cache. _ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: _ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") # This check we did call the fake head request mock_head.assert_called() def test_load_from_one_file(self): try: tmp_file = tempfile.mktemp() with open(tmp_file, "wb") as f: http_get( "https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/pytorch_model.bin", f ) config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") _ = BertModel.from_pretrained(tmp_file, config=config) finally: os.remove(tmp_file) def test_legacy_load_from_url(self): # This test is for deprecated behavior and can be removed in v5 config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") _ = BertModel.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/pytorch_model.bin", config=config ) @require_safetensors def test_use_safetensors(self): # Should not raise anymore AutoModel.from_pretrained("hf-internal-testing/tiny-random-RobertaModel", use_safetensors=True) # test that error if only safetensors is available with self.assertRaises(OSError) as env_error: BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-safetensors", use_safetensors=False) self.assertTrue("does not appear to have a file named pytorch_model.bin" in str(env_error.exception)) # test that only safetensors if both available and use_safetensors=False with tempfile.TemporaryDirectory() as tmp_dir: CLIPTextModel.from_pretrained( "hf-internal-testing/diffusers-stable-diffusion-tiny-all", subfolder="text_encoder", use_safetensors=False, cache_dir=tmp_dir, ) all_downloaded_files = glob.glob(os.path.join(tmp_dir, "*", "snapshots", "*", "*", "*")) self.assertTrue(any(f.endswith("bin") for f in all_downloaded_files)) self.assertFalse(any(f.endswith("safetensors") for f in all_downloaded_files)) # test that no safetensors if both available and use_safetensors=True with tempfile.TemporaryDirectory() as tmp_dir: CLIPTextModel.from_pretrained( "hf-internal-testing/diffusers-stable-diffusion-tiny-all", subfolder="text_encoder", use_safetensors=True, cache_dir=tmp_dir, ) all_downloaded_files = glob.glob(os.path.join(tmp_dir, "*", "snapshots", "*", "*", "*")) self.assertTrue(any(f.endswith("safetensors") for f in all_downloaded_files)) self.assertFalse(any(f.endswith("bin") for f in all_downloaded_files)) @require_safetensors def test_safetensors_save_and_load(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) # No pytorch_model.bin file, only a model.safetensors self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) new_model = BertModel.from_pretrained(tmp_dir) # Check models are equal for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) @require_safetensors def test_safetensors_load_from_hub(self): safetensors_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-safetensors") pytorch_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") # Check models are equal for p1, p2 in zip(safetensors_model.parameters(), pytorch_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) @require_safetensors def test_safetensors_save_and_load_sharded(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True, max_shard_size="100kB") # No pytorch_model.bin index file, only a model.safetensors index self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_INDEX_NAME))) self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME))) # No regular weights file self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) new_model = BertModel.from_pretrained(tmp_dir) # Check models are equal for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) @require_safetensors def test_safetensors_load_from_hub_sharded(self): safetensors_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded-safetensors") pytorch_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded") # Check models are equal for p1, p2 in zip(safetensors_model.parameters(), pytorch_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) def test_base_model_to_head_model_load(self): base_model = BaseModel(PretrainedConfig()) with tempfile.TemporaryDirectory() as tmp_dir: base_model.save_pretrained(tmp_dir, safe_serialization=False) # Can load a base model in a model with head model = ModelWithHead.from_pretrained(tmp_dir) for p1, p2 in zip(model.base.parameters(), base_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) # It doesn't work if the state dict has a mix of keys of the head and base without prefix though. base_state_dict = base_model.state_dict() head_state_dict = model.state_dict() base_state_dict["linear2.weight"] = head_state_dict["linear2.weight"] base_state_dict["linear2.bias"] = head_state_dict["linear2.bias"] safe_save_file(base_state_dict, os.path.join(tmp_dir, SAFE_WEIGHTS_NAME), metadata={"format": "pt"}) with self.assertRaisesRegex( ValueError, "The state dictionary of the model you are trying to load is corrupted." ): _ = ModelWithHead.from_pretrained(tmp_dir) def test_tied_weights_reload(self): # Base model = BaseModelWithTiedWeights(PretrainedConfig()) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) new_model = BaseModelWithTiedWeights.from_pretrained(tmp_dir) self.assertIs(new_model.linear.weight, new_model.linear_2.weight) state_dict = model.state_dict() # Remove tied weight from state_dict -> model should load with no complain of missing keys del state_dict["linear_2.weight"] torch.save(state_dict, os.path.join(tmp_dir, WEIGHTS_NAME)) new_model, load_info = BaseModelWithTiedWeights.from_pretrained(tmp_dir, output_loading_info=True) self.assertListEqual(load_info["missing_keys"], []) self.assertIs(new_model.linear.weight, new_model.linear_2.weight) # With head model.save_pretrained(tmp_dir) new_model, load_info = ModelWithHeadAndTiedWeights.from_pretrained(tmp_dir, output_loading_info=True) self.assertIs(new_model.base.linear.weight, new_model.decoder.weight) # Should only complain about the missing bias self.assertListEqual(load_info["missing_keys"], ["decoder.bias"]) def test_unexpected_keys_warnings(self): model = ModelWithHead(PretrainedConfig()) logger = logging.get_logger("transformers.modeling_utils") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) # Loading the model with a new class, we don't get a warning for unexpected weights, just an info with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: _, loading_info = BaseModel.from_pretrained(tmp_dir, output_loading_info=True) self.assertNotIn("were not used when initializing ModelWithHead", cl.out) self.assertEqual( set(loading_info["unexpected_keys"]), {"linear.weight", "linear.bias", "linear2.weight", "linear2.bias"}, ) # Loading the model with the same class, we do get a warning for unexpected weights state_dict = model.state_dict() state_dict["added_key"] = copy.deepcopy(state_dict["linear.weight"]) safe_save_file(state_dict, os.path.join(tmp_dir, SAFE_WEIGHTS_NAME), metadata={"format": "pt"}) with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: _, loading_info = ModelWithHead.from_pretrained(tmp_dir, output_loading_info=True) self.assertIn("were not used when initializing ModelWithHead: ['added_key']", cl.out) self.assertEqual(loading_info["unexpected_keys"], ["added_key"]) def test_warn_if_padding_and_no_attention_mask(self): logger = logging.get_logger("transformers.modeling_utils") with self.subTest("Ensure no warnings when pad_token_id is None."): logger.warning_once.cache_clear() with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: config_no_pad_token = PretrainedConfig() config_no_pad_token.pad_token_id = None model = ModelWithHead(config_no_pad_token) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) self.assertNotIn("We strongly recommend passing in an `attention_mask`", cl.out) with self.subTest("Ensure no warnings when there is an attention_mask."): logger.warning_once.cache_clear() with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: config = PretrainedConfig() config.pad_token_id = 0 model = ModelWithHead(config) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]]) attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) self.assertNotIn("We strongly recommend passing in an `attention_mask`", cl.out) with self.subTest("Ensure no warnings when there are no pad_token_ids in the input_ids."): logger.warning_once.cache_clear() with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: config = PretrainedConfig() config.pad_token_id = 0 model = ModelWithHead(config) input_ids = torch.tensor([[1, 345, 232, 328, 740, 140, 1695, 69, 6078, 2341, 25]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) self.assertNotIn("We strongly recommend passing in an `attention_mask`", cl.out) with self.subTest("Ensure a warning is shown when the input_ids start with a pad_token_id."): logger.warning_once.cache_clear() with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: config = PretrainedConfig() config.pad_token_id = 0 model = ModelWithHead(config) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 432, 5232]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) self.assertIn("We strongly recommend passing in an `attention_mask`", cl.out) with self.subTest("Ensure a warning is shown when the input_ids end with a pad_token_id."): logger.warning_once.cache_clear() with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: config = PretrainedConfig() config.pad_token_id = 0 model = ModelWithHead(config) input_ids = torch.tensor([[432, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) self.assertIn("We strongly recommend passing in an `attention_mask`", cl.out) with self.subTest("Ensure that the warning is shown at most once."): logger.warning_once.cache_clear() with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: config = PretrainedConfig() config.pad_token_id = 0 model = ModelWithHead(config) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) self.assertEqual(cl.out.count("We strongly recommend passing in an `attention_mask`"), 1) with self.subTest("Ensure a different warning is shown when the pad_token_id is equal to the bos_token_id."): logger.warning_once.cache_clear() with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: config = PretrainedConfig() config.pad_token_id = 0 config.bos_token_id = config.pad_token_id model = ModelWithHead(config) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) self.assertIn("You may ignore this warning if your `pad_token_id`", cl.out) if not is_torchdynamo_available(): return with self.subTest("Ensure that the warning code is skipped when compiling with torchdynamo."): logger.warning_once.cache_clear() from torch._dynamo import config, testing config = PretrainedConfig() config.pad_token_id = 0 model = ModelWithHead(config) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 432, 5232]]) def f(input_ids): model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) compile_counter = testing.CompileCounter() opt_fn = torch.compile(f, dynamic=True, backend=compile_counter) opt_fn(input_ids) self.assertEqual(compile_counter.frame_count, 0) @require_torch_accelerator @slow def test_pretrained_low_mem_new_config(self): # Checking for 1 model(the same one which was described in the issue) . model_ids = ["gpt2"] for model_id in model_ids: model_config = AutoConfig.from_pretrained(pretrained_model_name_or_path=model_id) model_config.n_layer = 48 model_config.n_head = 25 model_config.n_embd = 1600 model = AutoModelForCausalLM.from_pretrained( pretrained_model_name_or_path=model_id, config=model_config, ignore_mismatched_sizes=True, torch_dtype=torch.float16, low_cpu_mem_usage=True, ) model_ref = AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path=model_id) self.assertEqual(model.__class__.__name__, model_ref.__class__.__name__) def test_generation_config_is_loaded_with_model(self): # Note: `joaogante/tiny-random-gpt2-with-generation-config` has a `generation_config.json` containing a dummy # `transformers_version` field set to `foo`. If loading the file fails, this test also fails. # 1. Load without further parameters model = AutoModelForCausalLM.from_pretrained("joaogante/tiny-random-gpt2-with-generation-config") self.assertEqual(model.generation_config.transformers_version, "foo") # 2. Load with `device_map` model = AutoModelForCausalLM.from_pretrained( "joaogante/tiny-random-gpt2-with-generation-config", device_map="auto" ) self.assertEqual(model.generation_config.transformers_version, "foo") @require_safetensors def test_safetensors_torch_from_torch(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) new_model = BertModel.from_pretrained(tmp_dir) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) @require_safetensors @require_flax def test_safetensors_torch_from_flax(self): hub_model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only") model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) new_model = BertModel.from_pretrained(tmp_dir) for p1, p2 in zip(hub_model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) @require_tf @require_safetensors def test_safetensors_torch_from_tf(self): hub_model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only") model = TFBertModel.from_pretrained("hf-internal-testing/tiny-bert-tf-only") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) new_model = BertModel.from_pretrained(tmp_dir) for p1, p2 in zip(hub_model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) @require_safetensors def test_safetensors_torch_from_torch_sharded(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True, max_shard_size="100kB") new_model = BertModel.from_pretrained(tmp_dir) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) def test_modifying_model_config_causes_warning_saving_generation_config(self): model = AutoModelForCausalLM.from_pretrained("gpt2") model.config.top_k = 1 with tempfile.TemporaryDirectory() as tmp_dir: with self.assertLogs("transformers.modeling_utils", level="WARNING") as logs: model.save_pretrained(tmp_dir) self.assertEqual(len(logs.output), 1) self.assertIn("Your generation config was originally created from the model config", logs.output[0]) @slow @require_torch class ModelOnTheFlyConversionTester(unittest.TestCase): @classmethod def setUpClass(cls): cls.user = "huggingface-hub-ci" cls.token = os.getenv("HUGGINGFACE_PRODUCTION_USER_TOKEN", None) if cls.token is None: raise ValueError("Cannot run tests as secret isn't setup.") cls.api = HfApi(token=cls.token) def setUp(self) -> None: self.repo_name = f"{self.user}/test-model-on-the-fly-{uuid.uuid4()}" def tearDown(self) -> None: self.api.delete_repo(self.repo_name) def test_safetensors_on_the_fly_conversion(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False) converted_model = BertModel.from_pretrained(self.repo_name, use_safetensors=True) with self.subTest("Initial and converted models are equal"): for p1, p2 in zip(initial_model.parameters(), converted_model.parameters()): self.assertTrue(torch.equal(p1, p2)) with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name) discussion = next(discussions) self.assertEqual(discussion.author, "SFconvertbot") self.assertEqual(discussion.title, "Adding `safetensors` variant of this model") def test_safetensors_on_the_fly_conversion_private(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False, private=True) converted_model = BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token) with self.subTest("Initial and converted models are equal"): for p1, p2 in zip(initial_model.parameters(), converted_model.parameters()): self.assertTrue(torch.equal(p1, p2)) with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name, token=self.token) discussion = next(discussions) self.assertEqual(discussion.author, self.user) self.assertEqual(discussion.title, "Adding `safetensors` variant of this model") def test_safetensors_on_the_fly_conversion_gated(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False) headers = {"Authorization": f"Bearer {self.token}"} requests.put( f"https://huggingface.co/api/models/{self.repo_name}/settings", json={"gated": "auto"}, headers=headers ) converted_model = BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token) with self.subTest("Initial and converted models are equal"): for p1, p2 in zip(initial_model.parameters(), converted_model.parameters()): self.assertTrue(torch.equal(p1, p2)) with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name) discussion = next(discussions) self.assertEqual(discussion.author, "SFconvertbot") self.assertEqual(discussion.title, "Adding `safetensors` variant of this model") def test_safetensors_on_the_fly_sharded_conversion(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False, max_shard_size="200kb") converted_model = BertModel.from_pretrained(self.repo_name, use_safetensors=True) with self.subTest("Initial and converted models are equal"): for p1, p2 in zip(initial_model.parameters(), converted_model.parameters()): self.assertTrue(torch.equal(p1, p2)) with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name) discussion = next(discussions) self.assertEqual(discussion.author, "SFconvertbot") self.assertEqual(discussion.title, "Adding `safetensors` variant of this model") def test_safetensors_on_the_fly_sharded_conversion_private(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) initial_model.push_to_hub( self.repo_name, token=self.token, safe_serialization=False, max_shard_size="200kb", private=True ) converted_model = BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token) with self.subTest("Initial and converted models are equal"): for p1, p2 in zip(initial_model.parameters(), converted_model.parameters()): self.assertTrue(torch.equal(p1, p2)) with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name) discussion = next(discussions) self.assertEqual(discussion.author, self.user) self.assertEqual(discussion.title, "Adding `safetensors` variant of this model") def test_safetensors_on_the_fly_sharded_conversion_gated(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) initial_model.push_to_hub(self.repo_name, token=self.token, max_shard_size="200kb", safe_serialization=False) headers = {"Authorization": f"Bearer {self.token}"} requests.put( f"https://huggingface.co/api/models/{self.repo_name}/settings", json={"gated": "auto"}, headers=headers ) converted_model = BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token) with self.subTest("Initial and converted models are equal"): for p1, p2 in zip(initial_model.parameters(), converted_model.parameters()): self.assertTrue(torch.equal(p1, p2)) with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name) discussion = next(discussions) self.assertEqual(discussion.author, "SFconvertbot") self.assertEqual(discussion.title, "Adding `safetensors` variant of this model") @unittest.skip("Edge case, should work once the Space is updated`") def test_safetensors_on_the_fly_wrong_user_opened_pr(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False, private=True) BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token) # This should have opened a PR with the user's account with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name) discussion = next(discussions) self.assertEqual(discussion.author, self.user) self.assertEqual(discussion.title, "Adding `safetensors` variant of this model") # We now switch the repo visibility to public self.api.update_repo_visibility(self.repo_name, private=False) # We once again call from_pretrained, which should call the bot to open a PR BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token) with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name) bot_opened_pr = None bot_opened_pr_title = None for discussion in discussions: if discussion.author == "SFconvertBot": bot_opened_pr = True bot_opened_pr_title = discussion.title self.assertTrue(bot_opened_pr) self.assertEqual(bot_opened_pr_title, "Adding `safetensors` variant of this model") def test_safetensors_on_the_fly_specific_revision(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) # Push a model on `main` initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False) # Push a model on a given revision initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False, revision="new-branch") # Try to convert the model on that revision should raise with self.assertRaises(EnvironmentError): BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token, revision="new-branch") @require_torch @is_staging_test class ModelPushToHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-model") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-model-org") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="test-dynamic-model") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="test-dynamic-model-with-tags") except HTTPError: pass @unittest.skip("This test is flaky") def test_push_to_hub(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = BertModel(config) model.push_to_hub("test-model", token=self._token) new_model = BertModel.from_pretrained(f"{USER}/test-model") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(token=self._token, repo_id="test-model") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, repo_id="test-model", push_to_hub=True, token=self._token) new_model = BertModel.from_pretrained(f"{USER}/test-model") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) def test_push_to_hub_with_description(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = BertModel(config) COMMIT_DESCRIPTION = """ The commit description supports markdown synthax see: ```python >>> form transformers import AutoConfig >>> config = AutoConfig.from_pretrained("bert-base-uncased") ``` """ commit_details = model.push_to_hub( "test-model", use_auth_token=self._token, create_pr=True, commit_description=COMMIT_DESCRIPTION ) self.assertEqual(commit_details.commit_description, COMMIT_DESCRIPTION) @unittest.skip("This test is flaky") def test_push_to_hub_in_organization(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = BertModel(config) model.push_to_hub("valid_org/test-model-org", token=self._token) new_model = BertModel.from_pretrained("valid_org/test-model-org") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(token=self._token, repo_id="valid_org/test-model-org") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, push_to_hub=True, token=self._token, repo_id="valid_org/test-model-org") new_model = BertModel.from_pretrained("valid_org/test-model-org") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) def test_push_to_hub_dynamic_model(self): CustomConfig.register_for_auto_class() CustomModel.register_for_auto_class() config = CustomConfig(hidden_size=32) model = CustomModel(config) model.push_to_hub("test-dynamic-model", token=self._token) # checks self.assertDictEqual( config.auto_map, {"AutoConfig": "custom_configuration.CustomConfig", "AutoModel": "custom_modeling.CustomModel"}, ) new_model = AutoModel.from_pretrained(f"{USER}/test-dynamic-model", trust_remote_code=True) # Can't make an isinstance check because the new_model is from the CustomModel class of a dynamic module self.assertEqual(new_model.__class__.__name__, "CustomModel") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) config = AutoConfig.from_pretrained(f"{USER}/test-dynamic-model", trust_remote_code=True) new_model = AutoModel.from_config(config, trust_remote_code=True) self.assertEqual(new_model.__class__.__name__, "CustomModel") def test_push_to_hub_with_tags(self): from huggingface_hub import ModelCard new_tags = ["tag-1", "tag-2"] CustomConfig.register_for_auto_class() CustomModel.register_for_auto_class() config = CustomConfig(hidden_size=32) model = CustomModel(config) self.assertTrue(model.model_tags is None) model.add_model_tags(new_tags) self.assertTrue(model.model_tags == new_tags) model.push_to_hub("test-dynamic-model-with-tags", token=self._token) loaded_model_card = ModelCard.load(f"{USER}/test-dynamic-model-with-tags") self.assertEqual(loaded_model_card.data.tags, new_tags) @require_torch class AttentionMaskTester(unittest.TestCase): def check_non_causal(self, bsz, q_len, kv_len, mask_2d, mask_4d): mask_indices = (mask_2d != 1)[:, None].broadcast_to((bsz, q_len, kv_len)) mask_4d_values = mask_4d[:, 0][mask_indices] is_inf = mask_4d_values == -float("inf") is_min = mask_4d_values == torch.finfo(mask_4d.dtype).min assert torch.logical_or(is_inf, is_min).all() def check_to_4d(self, mask_converter, q_len, kv_len, additional_mask=None, bsz=3): mask_2d = torch.ones((bsz, kv_len), device=torch_device, dtype=torch.long) if additional_mask is not None: for bsz_idx, seq_idx in additional_mask: mask_2d[bsz_idx, seq_idx] = 0 mask_4d = mask_converter.to_4d(mask_2d, query_length=q_len, key_value_length=kv_len, dtype=torch.float32) assert mask_4d.shape == (bsz, 1, q_len, kv_len) # make sure there are no overflows assert mask_4d.min() != float("-inf") context = mask_converter.sliding_window if mask_converter.is_causal and context is None: # k * (k+1) / 2 tokens are masked in triangualar masks num_tokens_masked = bsz * (q_len * (q_len - 1) // 2) if 0 not in mask_2d: assert (mask_4d != 0).sum().cpu().item() == num_tokens_masked if 0 in mask_2d: # at least causal mask + maybe more assert (mask_4d != 0).sum().cpu().item() >= num_tokens_masked self.check_non_causal(bsz, q_len, kv_len, mask_2d, mask_4d) elif not mask_converter.is_causal and context is None: if 0 not in mask_2d: assert (mask_4d != 0).sum().cpu().item() == 0 if 0 in mask_2d: self.check_non_causal(bsz, q_len, kv_len, mask_2d, mask_4d) elif mask_converter.is_causal and context is not None: # k * (k+1) / 2 tokens are masked in triangualar masks num_tokens_masked = (q_len * (q_len - 1) // 2) + self.compute_num_context_mask(kv_len, context, q_len) num_tokens_masked = bsz * num_tokens_masked if 0 not in mask_2d: assert (mask_4d != 0).sum().cpu().item() == num_tokens_masked if 0 in mask_2d: # at least causal mask + maybe more assert (mask_4d != 0).sum().cpu().item() >= num_tokens_masked self.check_non_causal(bsz, q_len, kv_len, mask_2d, mask_4d) def check_to_causal(self, mask_converter, q_len, kv_len, bsz=3): mask_4d = mask_converter.to_causal_4d( bsz, query_length=q_len, key_value_length=kv_len, device=torch_device, dtype=torch.float32 ) if q_len == 1 and mask_converter.sliding_window is None: # no causal mask if q_len is 1 assert mask_4d is None return context = mask_converter.sliding_window if mask_converter.is_causal and context is None: # k * (k+1) / 2 tokens are masked in triangualar masks num_tokens_masked = bsz * (q_len * (q_len - 1) // 2) assert (mask_4d != 0).sum().cpu().item() == num_tokens_masked elif not mask_converter.is_causal and context is None: assert (mask_4d != 0).sum().cpu().item() == 0 elif mask_converter.is_causal and context is not None: # k * (k+1) / 2 tokens are masked in triangualar masks num_tokens_masked = (q_len * (q_len - 1) // 2) + self.compute_num_context_mask(kv_len, context, q_len) num_tokens_masked = bsz * num_tokens_masked assert (mask_4d != 0).sum().cpu().item() == num_tokens_masked def compute_num_context_mask(self, kv_len, context, q_len): # This function computes the # of attention tokens that are added for # the sliding window c_mask_len = kv_len - context num_mask_triangle = c_mask_len * (c_mask_len + 1) // 2 cut_mask_len = max(c_mask_len - q_len, 0) num_cut_mask = cut_mask_len * (cut_mask_len + 1) // 2 return num_mask_triangle - num_cut_mask def test_2d_to_4d_causal(self): mask_converter = AttentionMaskConverter(is_causal=True) # auto-regressive use case self.check_to_4d(mask_converter, q_len=1, kv_len=7) # special auto-regressive case self.check_to_4d(mask_converter, q_len=3, kv_len=7) # non auto-regressive case self.check_to_4d(mask_converter, q_len=7, kv_len=7) # same with extra attention masks self.check_to_4d(mask_converter, q_len=1, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) self.check_to_4d(mask_converter, q_len=3, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) self.check_to_4d(mask_converter, q_len=7, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) # check that the mask does not overflow on causal masked tokens self.check_to_4d(mask_converter, q_len=7, kv_len=7, additional_mask=[(0, 0), (1, 0), (1, 1)]) def test_2d_to_4d(self): mask_converter = AttentionMaskConverter(is_causal=False) # non auto-regressive case self.check_to_4d(mask_converter, q_len=7, kv_len=7) # same with extra attention masks self.check_to_4d(mask_converter, q_len=7, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) def test_2d_to_4d_causal_sliding(self): mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=5) # auto-regressive use case self.check_to_4d(mask_converter, q_len=1, kv_len=7) # special auto-regressive case self.check_to_4d(mask_converter, q_len=3, kv_len=7) # non auto-regressive case self.check_to_4d(mask_converter, q_len=7, kv_len=7) # same with extra attention masks self.check_to_4d(mask_converter, q_len=1, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) self.check_to_4d(mask_converter, q_len=3, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) self.check_to_4d(mask_converter, q_len=7, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) def test_causal_mask(self): mask_converter = AttentionMaskConverter(is_causal=True) # auto-regressive use case self.check_to_causal(mask_converter, q_len=1, kv_len=7) # special auto-regressive case self.check_to_causal(mask_converter, q_len=3, kv_len=7) # non auto-regressive case self.check_to_causal(mask_converter, q_len=7, kv_len=7) def test_causal_mask_sliding(self): mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=3) # auto-regressive use case self.check_to_causal(mask_converter, q_len=1, kv_len=7) # special auto-regressive case self.check_to_causal(mask_converter, q_len=3, kv_len=7) # non auto-regressive case self.check_to_causal(mask_converter, q_len=7, kv_len=7) def test_torch_compile_fullgraph(self): model = Prepare4dCausalAttentionMaskModel() inputs_embeds = torch.rand([1, 3, 32]) res_non_compiled = model(inputs_embeds) compiled_model = torch.compile(model, fullgraph=True) res_compiled = compiled_model(inputs_embeds) self.assertTrue(torch.equal(res_non_compiled, res_compiled)) model = Create4dCausalAttentionMaskModel() inputs_embeds = torch.rand(2, 4, 16) res_non_compiled = model(inputs_embeds) compiled_model = torch.compile(model, fullgraph=True) res_compiled = compiled_model(inputs_embeds) self.assertTrue(torch.equal(res_non_compiled, res_compiled)) model = Prepare4dAttentionMaskModel() mask = torch.ones(2, 4) mask[0, :2] = 0 inputs_embeds = torch.rand(2, 4, 16) res_non_compiled = model(mask, inputs_embeds) compiled_model = torch.compile(model, fullgraph=True) res_compiled = compiled_model(mask, inputs_embeds) self.assertTrue(torch.equal(res_non_compiled, res_compiled)) @require_torch @slow def test_unmask_unattended_left_padding(self): attention_mask = torch.Tensor([[0, 0, 1], [1, 1, 1], [0, 1, 1]]).to(torch.int64) expanded_mask = torch.Tensor( [ [[[0, 0, 0], [0, 0, 0], [0, 0, 1]]], [[[1, 0, 0], [1, 1, 0], [1, 1, 1]]], [[[0, 0, 0], [0, 1, 0], [0, 1, 1]]], ] ).to(torch.int64) reference_output = torch.Tensor( [ [[[1, 1, 1], [1, 1, 1], [0, 0, 1]]], [[[1, 0, 0], [1, 1, 0], [1, 1, 1]]], [[[1, 1, 1], [0, 1, 0], [0, 1, 1]]], ] ).to(torch.int64) result = AttentionMaskConverter._unmask_unattended(expanded_mask, attention_mask, unmasked_value=1) self.assertTrue(torch.equal(result, reference_output)) attention_mask = torch.Tensor([[0, 0, 1, 1, 1], [1, 1, 1, 1, 1], [0, 1, 1, 1, 1]]).to(torch.int64) attn_mask_converter = AttentionMaskConverter(is_causal=True) past_key_values_length = 0 key_value_length = attention_mask.shape[-1] + past_key_values_length expanded_mask = attn_mask_converter.to_4d( attention_mask, attention_mask.shape[-1], key_value_length=key_value_length, dtype=torch.float32 ) result = AttentionMaskConverter._unmask_unattended(expanded_mask, attention_mask, unmasked_value=0) min_inf = torch.finfo(torch.float32).min reference_output = torch.Tensor( [ [ [ [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [min_inf, min_inf, 0, min_inf, min_inf], [min_inf, min_inf, 0, 0, min_inf], [min_inf, min_inf, 0, 0, 0], ] ], [ [ [0, min_inf, min_inf, min_inf, min_inf], [0, 0, min_inf, min_inf, min_inf], [0, 0, 0, min_inf, min_inf], [0, 0, 0, 0, min_inf], [0, 0, 0, 0, 0], ] ], [ [ [0, 0, 0, 0, 0], [min_inf, 0, min_inf, min_inf, min_inf], [min_inf, 0, 0, min_inf, min_inf], [min_inf, 0, 0, 0, min_inf], [min_inf, 0, 0, 0, 0], ] ], ] ) self.assertTrue(torch.equal(reference_output, result)) @require_torch @slow def test_unmask_unattended_right_padding(self): attention_mask = torch.Tensor([[1, 1, 1, 0], [1, 1, 1, 1], [1, 1, 0, 0]]).to(torch.int64) attn_mask_converter = AttentionMaskConverter(is_causal=True) past_key_values_length = 0 key_value_length = attention_mask.shape[-1] + past_key_values_length expanded_mask = attn_mask_converter.to_4d( attention_mask, attention_mask.shape[-1], key_value_length=key_value_length, dtype=torch.float32 ) result = AttentionMaskConverter._unmask_unattended(expanded_mask, attention_mask, unmasked_value=0) self.assertTrue(torch.equal(expanded_mask, result)) @require_torch @slow def test_unmask_unattended_random_mask(self): attention_mask = torch.Tensor([[1, 0, 1, 0], [1, 0, 1, 1], [1, 1, 0, 1]]).to(torch.int64) attn_mask_converter = AttentionMaskConverter(is_causal=True) past_key_values_length = 0 key_value_length = attention_mask.shape[-1] + past_key_values_length expanded_mask = attn_mask_converter.to_4d( attention_mask, attention_mask.shape[-1], key_value_length=key_value_length, dtype=torch.float32 ) result = AttentionMaskConverter._unmask_unattended(expanded_mask, attention_mask, unmasked_value=0) self.assertTrue(torch.equal(expanded_mask, result)) @require_torch class TestAttentionImplementation(unittest.TestCase): def test_error_no_sdpa_available(self): with self.assertRaises(ValueError) as cm: _ = AutoModel.from_pretrained("hf-tiny-model-private/tiny-random-MCTCTModel", attn_implementation="sdpa") self.assertTrue( "does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention" in str(cm.exception) ) _ = AutoModel.from_pretrained("hf-tiny-model-private/tiny-random-MCTCTModel") def test_error_no_flash_available(self): with self.assertRaises(ValueError) as cm: _ = AutoModel.from_pretrained( "hf-tiny-model-private/tiny-random-MCTCTModel", attn_implementation="flash_attention_2" ) self.assertTrue("does not support Flash Attention 2.0" in str(cm.exception)) def test_error_no_flash_available_with_config(self): with self.assertRaises(ValueError) as cm: config = AutoConfig.from_pretrained("hf-tiny-model-private/tiny-random-MCTCTModel") _ = AutoModel.from_pretrained( "hf-tiny-model-private/tiny-random-MCTCTModel", config=config, attn_implementation="flash_attention_2" ) self.assertTrue("does not support Flash Attention 2.0" in str(cm.exception)) def test_error_wrong_attn_implementation(self): with self.assertRaises(ValueError) as cm: _ = AutoModel.from_pretrained("hf-tiny-model-private/tiny-random-MCTCTModel", attn_implementation="foo") self.assertTrue('The only possible arguments are `attn_implementation="eager"' in str(cm.exception)) def test_not_available_flash(self): if is_flash_attn_2_available(): self.skipTest("Please uninstall flash-attn package to run test_not_available_flash") with self.assertRaises(ImportError) as cm: _ = AutoModel.from_pretrained( "hf-internal-testing/tiny-random-GPTBigCodeModel", attn_implementation="flash_attention_2" ) self.assertTrue("the package flash_attn seems to be not installed" in str(cm.exception)) def test_not_available_flash_with_config(self): if is_flash_attn_2_available(): self.skipTest("Please uninstall flash-attn package to run test_not_available_flash") config = AutoConfig.from_pretrained("hf-internal-testing/tiny-random-GPTBigCodeModel") with self.assertRaises(ImportError) as cm: _ = AutoModel.from_pretrained( "hf-internal-testing/tiny-random-GPTBigCodeModel", config=config, attn_implementation="flash_attention_2", ) self.assertTrue("the package flash_attn seems to be not installed" in str(cm.exception)) def test_not_available_sdpa(self): if is_torch_sdpa_available(): self.skipTest("This test requires torch<=2.0") with self.assertRaises(ImportError) as cm: _ = AutoModel.from_pretrained( "hf-internal-testing/tiny-random-GPTBigCodeModel", attn_implementation="sdpa" ) self.assertTrue("PyTorch SDPA requirements in Transformers are not met" in str(cm.exception)) @slow @require_torch_gpu class Mask4DTestBase(unittest.TestCase): def tearDown(self): gc.collect() torch.cuda.empty_cache() def get_test_data(self): texts = ["the cat sat", "the cat had", "the cat is"] encoded = [self.tokenizer.encode(t) for t in texts] input_0 = torch.tensor(encoded, device=torch_device) # tensor([[ 1, 278, 6635, 3290], # [ 1, 278, 6635, 750], # [ 1, 278, 6635, 338]], device='cuda:0') # Combining common prefix with the unique ending tokens: input_1 = torch.cat([input_0[0][:-1], input_0[:, -1]]).unsqueeze(0) # tensor([[ 1, 278, 6635, 3290, 750, 338]], device='cuda:0') # Creating a 4D mask where each of the last 3 tokens do not attend to each other. mask_1 = torch.tensor( [ [ [ [1, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 0, 0], [1, 1, 1, 0, 1, 0], [1, 1, 1, 0, 0, 1], ] ] ], device="cuda:0", dtype=torch.int64, ) # Creating a position_ids tensor. note the repeating figures in the end. position_ids_1 = torch.tensor([[0, 1, 2, 3, 3, 3]], device=torch_device, dtype=torch.int64) return input_0, input_1, mask_1, position_ids_1 @slow @require_torch_gpu class Mask4DTestFP32(Mask4DTestBase): def setUp(self): model_name = "JackFram/llama-68m" # small Llama-like model from FlexFlow model_dtype = torch.float32 self.tokenizer = AutoTokenizer.from_pretrained(model_name) self.model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=model_dtype).to(torch_device) def test_attention(self): """comparing outputs of attention layer""" input_0, input_1, mask_1, position_ids_1 = self.get_test_data() hid_0 = self.model.model.embed_tokens(input_0) outs_0 = self.model.model.layers[0].self_attn.forward(hid_0)[0] # outs_0.shape == torch.Size([3, 4, 768]) hid_1 = self.model.model.embed_tokens(input_1) outs_1 = self.model.model.layers[0].self_attn.forward( hid_1, attention_mask=mask_1.bool(), position_ids=position_ids_1 )[0] # outs_1.shape == torch.Size([1, 6, 768]) outs_0_last_tokens = outs_0[:, -1, :] # last tokens in each batch line outs_1_last_tokens = outs_1[0, -3:, :] # last three tokens assert torch.allclose(outs_0_last_tokens, outs_1_last_tokens) def test_inner_model(self): """comparing hidden outputs of whole inner model""" input_0, input_1, mask_1, position_ids_1 = self.get_test_data() logits_0 = self.model.forward(input_0).logits logits_1 = self.model.forward(input_1, attention_mask=mask_1.bool(), position_ids=position_ids_1).logits logits_0_last_tokens = logits_0[:, -1, :] # last tokens in each batch line logits_1_last_tokens = logits_1[0, -3:, :] # last three tokens torch.testing.assert_close( logits_0_last_tokens, logits_1_last_tokens, ) def test_causal_model_logits(self): """comparing logits outputs of whole inner model""" input_0, input_1, mask_1, position_ids_1 = self.get_test_data() logits_0 = self.model.forward(input_0).logits logits_1 = self.model.forward(input_1, attention_mask=mask_1.bool(), position_ids=position_ids_1).logits logits_0_last_tokens = logits_0[:, -1, :] # last tokens in each batch line logits_1_last_tokens = logits_1[0, -3:, :] # last three tokens torch.testing.assert_close( logits_0_last_tokens, logits_1_last_tokens, ) @slow @require_torch_gpu class Mask4DTestFP16(Mask4DTestBase): test_attention = Mask4DTestFP32.test_attention def setUp(self): model_name = "JackFram/llama-68m" # small Llama-like model from FlexFlow model_dtype = torch.float16 self.tokenizer = AutoTokenizer.from_pretrained(model_name) self.model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=model_dtype).to(torch_device) def test_causal_model_logits(self): """comparing logits outputs of whole inner model""" input_0, input_1, mask_1, position_ids_1 = self.get_test_data() logits_0 = self.model.forward(input_0).logits logits_1 = self.model.forward(input_1, attention_mask=mask_1.bool(), position_ids=position_ids_1).logits logits_0_last_tokens = logits_0[:, -1, :] # last tokens in each batch line logits_1_last_tokens = logits_1[0, -3:, :] # last three tokens indices_0 = logits_0_last_tokens.sort(descending=True).indices indices_1 = logits_1_last_tokens.sort(descending=True).indices # checking logits, but note relaxed tolerances for FP16 torch.testing.assert_close(logits_0_last_tokens, logits_1_last_tokens, atol=0.02, rtol=0.001) # checking tokens order for the top tokens for token_ids_0, token_ids_1 in zip(indices_0, indices_1): self.assertTrue(torch.equal(token_ids_0[:128], token_ids_1[:128]))
transformers/tests/test_modeling_utils.py/0
{ "file_path": "transformers/tests/test_modeling_utils.py", "repo_id": "transformers", "token_count": 42668 }
418
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class TestActivations(unittest.TestCase): def test_gelu_versions(self): x = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100]) torch_builtin = get_activation("gelu") self.assertTrue(torch.allclose(gelu_python(x), torch_builtin(x))) self.assertFalse(torch.allclose(gelu_python(x), gelu_new(x))) def test_gelu_10(self): x = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100]) torch_builtin = get_activation("gelu") gelu10 = get_activation("gelu_10") y_gelu = torch_builtin(x) y_gelu_10 = gelu10(x) clipped_mask = torch.where(y_gelu_10 < 10.0, 1, 0) self.assertTrue(torch.max(y_gelu_10).item() == 10.0) self.assertTrue(torch.allclose(y_gelu * clipped_mask, y_gelu_10 * clipped_mask)) def test_get_activation(self): get_activation("gelu") get_activation("gelu_10") get_activation("gelu_fast") get_activation("gelu_new") get_activation("gelu_python") get_activation("gelu_pytorch_tanh") get_activation("linear") get_activation("mish") get_activation("quick_gelu") get_activation("relu") get_activation("sigmoid") get_activation("silu") get_activation("swish") get_activation("tanh") with self.assertRaises(KeyError): get_activation("bogus") with self.assertRaises(KeyError): get_activation(None) def test_activations_are_distinct_objects(self): act1 = get_activation("gelu") act1.a = 1 act2 = get_activation("gelu") self.assertEqual(act1.a, 1) with self.assertRaises(AttributeError): _ = act2.a
transformers/tests/utils/test_activations.py/0
{ "file_path": "transformers/tests/utils/test_activations.py", "repo_id": "transformers", "token_count": 1061 }
419
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import tempfile import unittest from transformers.modelcard import ModelCard class ModelCardTester(unittest.TestCase): def setUp(self): self.inputs_dict = { "model_details": { "Organization": "testing", "Model date": "today", "Model version": "v2.1, Developed by Test Corp in 2019.", "Architecture": "Convolutional Neural Network.", }, "metrics": "BLEU and ROUGE-1", "evaluation_data": { "Datasets": {"BLEU": "My-great-dataset-v1", "ROUGE-1": "My-short-dataset-v2.1"}, "Preprocessing": "See details on https://arxiv.org/pdf/1810.03993.pdf", }, "training_data": { "Dataset": "English Wikipedia dump dated 2018-12-01", "Preprocessing": ( "Using SentencePiece vocabulary of size 52k tokens. See details on" " https://arxiv.org/pdf/1810.03993.pdf" ), }, "quantitative_analyses": {"BLEU": 55.1, "ROUGE-1": 76}, } def test_model_card_common_properties(self): modelcard = ModelCard.from_dict(self.inputs_dict) self.assertTrue(hasattr(modelcard, "model_details")) self.assertTrue(hasattr(modelcard, "intended_use")) self.assertTrue(hasattr(modelcard, "factors")) self.assertTrue(hasattr(modelcard, "metrics")) self.assertTrue(hasattr(modelcard, "evaluation_data")) self.assertTrue(hasattr(modelcard, "training_data")) self.assertTrue(hasattr(modelcard, "quantitative_analyses")) self.assertTrue(hasattr(modelcard, "ethical_considerations")) self.assertTrue(hasattr(modelcard, "caveats_and_recommendations")) def test_model_card_to_json_string(self): modelcard = ModelCard.from_dict(self.inputs_dict) obj = json.loads(modelcard.to_json_string()) for key, value in self.inputs_dict.items(): self.assertEqual(obj[key], value) def test_model_card_to_json_file(self): model_card_first = ModelCard.from_dict(self.inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: filename = os.path.join(tmpdirname, "modelcard.json") model_card_first.to_json_file(filename) model_card_second = ModelCard.from_json_file(filename) self.assertEqual(model_card_second.to_dict(), model_card_first.to_dict()) def test_model_card_from_and_save_pretrained(self): model_card_first = ModelCard.from_dict(self.inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: model_card_first.save_pretrained(tmpdirname) model_card_second = ModelCard.from_pretrained(tmpdirname) self.assertEqual(model_card_second.to_dict(), model_card_first.to_dict())
transformers/tests/utils/test_model_card.py/0
{ "file_path": "transformers/tests/utils/test_model_card.py", "repo_id": "transformers", "token_count": 1475 }
420
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utility that checks the custom inits of Transformers are well-defined: Transformers uses init files that delay the import of an object to when it's actually needed. This is to avoid the main init importing all models, which would make the line `import transformers` very slow when the user has all optional dependencies installed. The inits with delayed imports have two halves: one definining a dictionary `_import_structure` which maps modules to the name of the objects in each module, and one in `TYPE_CHECKING` which looks like a normal init for type-checkers. The goal of this script is to check the objects defined in both halves are the same. This also checks the main init properly references all submodules, even if it doesn't import anything from them: every submodule should be defined as a key of `_import_structure`, with an empty list as value potentially, or the submodule won't be importable. Use from the root of the repo with: ```bash python utils/check_inits.py ``` for a check that will error in case of inconsistencies (used by `make repo-consistency`). There is no auto-fix possible here sadly :-( """ import collections import os import re from pathlib import Path from typing import Dict, List, Optional, Tuple # Path is set with the intent you should run this script from the root of the repo. PATH_TO_TRANSFORMERS = "src/transformers" # Matches is_xxx_available() _re_backend = re.compile(r"is\_([a-z_]*)_available()") # Catches a one-line _import_struct = {xxx} _re_one_line_import_struct = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] _re_import_struct_key_value = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available _re_test_backend = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)") # Catches a line _import_struct["bla"].append("foo") _re_import_struct_add_one = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] _re_import_struct_add_many = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]") # Catches a line with an object between quotes and a comma: "MyModel", _re_quote_object = re.compile(r'^\s+"([^"]+)",') # Catches a line with objects between brackets only: ["foo", "bar"], _re_between_brackets = re.compile(r"^\s+\[([^\]]+)\]") # Catches a line with from foo import bar, bla, boo _re_import = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") # Catches a line with try: _re_try = re.compile(r"^\s*try:") # Catches a line with else: _re_else = re.compile(r"^\s*else:") def find_backend(line: str) -> Optional[str]: """ Find one (or multiple) backend in a code line of the init. Args: line (`str`): A code line of the main init. Returns: Optional[`str`]: If one (or several) backend is found, returns it. In the case of multiple backends (the line contains `if is_xxx_available() and `is_yyy_available()`) returns all backends joined on `_and_` (so `xxx_and_yyy` for instance). """ if _re_test_backend.search(line) is None: return None backends = [b[0] for b in _re_backend.findall(line)] backends.sort() return "_and_".join(backends) def parse_init(init_file) -> Optional[Tuple[Dict[str, List[str]], Dict[str, List[str]]]]: """ Read an init_file and parse (per backend) the `_import_structure` objects defined and the `TYPE_CHECKING` objects defined. Args: init_file (`str`): Path to the init file to inspect. Returns: `Optional[Tuple[Dict[str, List[str]], Dict[str, List[str]]]]`: A tuple of two dictionaries mapping backends to list of imported objects, one for the `_import_structure` part of the init and one for the `TYPE_CHECKING` part of the init. Returns `None` if the init is not a custom init. """ with open(init_file, "r", encoding="utf-8", newline="\n") as f: lines = f.readlines() # Get the to `_import_structure` definition. line_index = 0 while line_index < len(lines) and not lines[line_index].startswith("_import_structure = {"): line_index += 1 # If this is a traditional init, just return. if line_index >= len(lines): return None # First grab the objects without a specific backend in _import_structure objects = [] while not lines[line_index].startswith("if TYPE_CHECKING") and find_backend(lines[line_index]) is None: line = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(line): content = _re_one_line_import_struct.search(line).groups()[0] imports = re.findall(r"\[([^\]]+)\]", content) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(", ")]) line_index += 1 continue single_line_import_search = _re_import_struct_key_value.search(line) if single_line_import_search is not None: imports = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", ") if len(obj) > 0] objects.extend(imports) elif line.startswith(" " * 8 + '"'): objects.append(line[9:-3]) line_index += 1 # Those are stored with the key "none". import_dict_objects = {"none": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("if TYPE_CHECKING"): # If the line is an if not is_backend_available, we grab all objects associated. backend = find_backend(lines[line_index]) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1]) is None: backend = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index]) is None: line_index += 1 line_index += 1 objects = [] # Until we unindent, add backend objects to the list while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 4): line = lines[line_index] if _re_import_struct_add_one.search(line) is not None: objects.append(_re_import_struct_add_one.search(line).groups()[0]) elif _re_import_struct_add_many.search(line) is not None: imports = _re_import_struct_add_many.search(line).groups()[0].split(", ") imports = [obj[1:-1] for obj in imports if len(obj) > 0] objects.extend(imports) elif _re_between_brackets.search(line) is not None: imports = _re_between_brackets.search(line).groups()[0].split(", ") imports = [obj[1:-1] for obj in imports if len(obj) > 0] objects.extend(imports) elif _re_quote_object.search(line) is not None: objects.append(_re_quote_object.search(line).groups()[0]) elif line.startswith(" " * 8 + '"'): objects.append(line[9:-3]) elif line.startswith(" " * 12 + '"'): objects.append(line[13:-3]) line_index += 1 import_dict_objects[backend] = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend objects = [] while ( line_index < len(lines) and find_backend(lines[line_index]) is None and not lines[line_index].startswith("else") ): line = lines[line_index] single_line_import_search = _re_import.search(line) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(", ")) elif line.startswith(" " * 8): objects.append(line[8:-2]) line_index += 1 type_hint_objects = {"none": objects} # Let's continue with backend-specific objects while line_index < len(lines): # If the line is an if is_backend_available, we grab all objects associated. backend = find_backend(lines[line_index]) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1]) is None: backend = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index]) is None: line_index += 1 line_index += 1 objects = [] # Until we unindent, add backend objects to the list while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 8): line = lines[line_index] single_line_import_search = _re_import.search(line) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(", ")) elif line.startswith(" " * 12): objects.append(line[12:-2]) line_index += 1 type_hint_objects[backend] = objects else: line_index += 1 return import_dict_objects, type_hint_objects def analyze_results(import_dict_objects: Dict[str, List[str]], type_hint_objects: Dict[str, List[str]]) -> List[str]: """ Analyze the differences between _import_structure objects and TYPE_CHECKING objects found in an init. Args: import_dict_objects (`Dict[str, List[str]]`): A dictionary mapping backend names (`"none"` for the objects independent of any specific backend) to list of imported objects. type_hint_objects (`Dict[str, List[str]]`): A dictionary mapping backend names (`"none"` for the objects independent of any specific backend) to list of imported objects. Returns: `List[str]`: The list of errors corresponding to mismatches. """ def find_duplicates(seq): return [k for k, v in collections.Counter(seq).items() if v > 1] # If one backend is missing from the other part of the init, error early. if list(import_dict_objects.keys()) != list(type_hint_objects.keys()): return ["Both sides of the init do not have the same backends!"] errors = [] # Find all errors. for key in import_dict_objects.keys(): # Duplicate imports in any half. duplicate_imports = find_duplicates(import_dict_objects[key]) if duplicate_imports: errors.append(f"Duplicate _import_structure definitions for: {duplicate_imports}") duplicate_type_hints = find_duplicates(type_hint_objects[key]) if duplicate_type_hints: errors.append(f"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}") # Missing imports in either part of the init. if sorted(set(import_dict_objects[key])) != sorted(set(type_hint_objects[key])): name = "base imports" if key == "none" else f"{key} backend" errors.append(f"Differences for {name}:") for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f" {a} in TYPE_HINT but not in _import_structure.") for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f" {a} in _import_structure but not in TYPE_HINT.") return errors def check_all_inits(): """ Check all inits in the transformers repo and raise an error if at least one does not define the same objects in both halves. """ failures = [] for root, _, files in os.walk(PATH_TO_TRANSFORMERS): if "__init__.py" in files: fname = os.path.join(root, "__init__.py") objects = parse_init(fname) if objects is not None: errors = analyze_results(*objects) if len(errors) > 0: errors[0] = f"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}" failures.append("\n".join(errors)) if len(failures) > 0: raise ValueError("\n\n".join(failures)) def get_transformers_submodules() -> List[str]: """ Returns the list of Transformers submodules. """ submodules = [] for path, directories, files in os.walk(PATH_TO_TRANSFORMERS): for folder in directories: # Ignore private modules if folder.startswith("_"): directories.remove(folder) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(path) / folder).glob("*.py"))) == 0: continue short_path = str((Path(path) / folder).relative_to(PATH_TO_TRANSFORMERS)) submodule = short_path.replace(os.path.sep, ".") submodules.append(submodule) for fname in files: if fname == "__init__.py": continue short_path = str((Path(path) / fname).relative_to(PATH_TO_TRANSFORMERS)) submodule = short_path.replace(".py", "").replace(os.path.sep, ".") if len(submodule.split(".")) == 1: submodules.append(submodule) return submodules IGNORE_SUBMODULES = [ "convert_pytorch_checkpoint_to_tf2", "modeling_flax_pytorch_utils", "models.esm.openfold_utils", "modeling_attn_mask_utils", "safetensors_conversion", ] def check_submodules(): """ Check all submodules of Transformers are properly registered in the main init. Error otherwise. """ # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import transformers = direct_transformers_import(PATH_TO_TRANSFORMERS) import_structure_keys = set(transformers._import_structure.keys()) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"), "r") as f: init_content = f.read() import_structure_keys.update(set(re.findall(r"import_structure\[\"([^\"]*)\"\]", init_content))) module_not_registered = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(module_not_registered) > 0: list_of_modules = "\n".join(f"- {module}" for module in module_not_registered) raise ValueError( "The following submodules are not properly registed in the main init of Transformers:\n" f"{list_of_modules}\n" "Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." ) if __name__ == "__main__": check_all_inits() check_submodules()
transformers/utils/check_inits.py/0
{ "file_path": "transformers/utils/check_inits.py", "repo_id": "transformers", "token_count": 6546 }
421
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append(".") r""" The argument `test_file` in this file refers to a model test file. This should be a string of the from `tests/models/*/test_modeling_*.py`. """ def get_module_path(test_file): """Return the module path of a model test file.""" components = test_file.split(os.path.sep) if components[0:2] != ["tests", "models"]: raise ValueError( "`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got " f"{test_file} instead." ) test_fn = components[-1] if not test_fn.endswith("py"): raise ValueError(f"`test_file` should be a python file. Got {test_fn} instead.") if not test_fn.startswith("test_modeling_"): raise ValueError( f"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." ) components = components[:-1] + [test_fn.replace(".py", "")] test_module_path = ".".join(components) return test_module_path def get_test_module(test_file): """Get the module of a model test file.""" test_module_path = get_module_path(test_file) test_module = importlib.import_module(test_module_path) return test_module def get_tester_classes(test_file): """Get all classes in a model test file whose names ends with `ModelTester`.""" tester_classes = [] test_module = get_test_module(test_file) for attr in dir(test_module): if attr.endswith("ModelTester"): tester_classes.append(getattr(test_module, attr)) # sort with class names return sorted(tester_classes, key=lambda x: x.__name__) def get_test_classes(test_file): """Get all [test] classes in a model test file with attribute `all_model_classes` that are non-empty. These are usually the (model) test classes containing the (non-slow) tests to run and are subclasses of one of the classes `ModelTesterMixin`, `TFModelTesterMixin` or `FlaxModelTesterMixin`, as well as a subclass of `unittest.TestCase`. Exceptions include `RagTestMixin` (and its subclasses). """ test_classes = [] test_module = get_test_module(test_file) for attr in dir(test_module): attr_value = getattr(test_module, attr) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). model_classes = getattr(attr_value, "all_model_classes", []) if len(model_classes) > 0: test_classes.append(attr_value) # sort with class names return sorted(test_classes, key=lambda x: x.__name__) def get_model_classes(test_file): """Get all model classes that appear in `all_model_classes` attributes in a model test file.""" test_classes = get_test_classes(test_file) model_classes = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes) # sort with class names return sorted(model_classes, key=lambda x: x.__name__) def get_model_tester_from_test_class(test_class): """Get the model tester class of a model test class.""" test = test_class() if hasattr(test, "setUp"): test.setUp() model_tester = None if hasattr(test, "model_tester"): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: model_tester = test.model_tester.__class__ return model_tester def get_test_classes_for_model(test_file, model_class): """Get all [test] classes in `test_file` that have `model_class` in their `all_model_classes`.""" test_classes = get_test_classes(test_file) target_test_classes = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(test_class) # sort with class names return sorted(target_test_classes, key=lambda x: x.__name__) def get_tester_classes_for_model(test_file, model_class): """Get all model tester classes in `test_file` that are associated to `model_class`.""" test_classes = get_test_classes_for_model(test_file, model_class) tester_classes = [] for test_class in test_classes: tester_class = get_model_tester_from_test_class(test_class) if tester_class is not None: tester_classes.append(tester_class) # sort with class names return sorted(tester_classes, key=lambda x: x.__name__) def get_test_to_tester_mapping(test_file): """Get a mapping from [test] classes to model tester classes in `test_file`. This uses `get_test_classes` which may return classes that are NOT subclasses of `unittest.TestCase`. """ test_classes = get_test_classes(test_file) test_tester_mapping = {test_class: get_model_tester_from_test_class(test_class) for test_class in test_classes} return test_tester_mapping def get_model_to_test_mapping(test_file): """Get a mapping from model classes to test classes in `test_file`.""" model_classes = get_model_classes(test_file) model_test_mapping = { model_class: get_test_classes_for_model(test_file, model_class) for model_class in model_classes } return model_test_mapping def get_model_to_tester_mapping(test_file): """Get a mapping from model classes to model tester classes in `test_file`.""" model_classes = get_model_classes(test_file) model_to_tester_mapping = { model_class: get_tester_classes_for_model(test_file, model_class) for model_class in model_classes } return model_to_tester_mapping def to_json(o): """Make the information succinct and easy to read. Avoid the full class representation like `<class 'transformers.models.bert.modeling_bert.BertForMaskedLM'>` when displaying the results. Instead, we use class name (`BertForMaskedLM`) for the readability. """ if isinstance(o, str): return o elif isinstance(o, type): return o.__name__ elif isinstance(o, (list, tuple)): return [to_json(x) for x in o] elif isinstance(o, dict): return {to_json(k): to_json(v) for k, v in o.items()} else: return o
transformers/utils/get_test_info.py/0
{ "file_path": "transformers/utils/get_test_info.py", "repo_id": "transformers", "token_count": 2577 }
422
from transformers import ProcessorMixin class CustomProcessor(ProcessorMixin): feature_extractor_class = "AutoFeatureExtractor" tokenizer_class = "AutoTokenizer"
transformers/utils/test_module/custom_processing.py/0
{ "file_path": "transformers/utils/test_module/custom_processing.py", "repo_id": "transformers", "token_count": 51 }
423
<div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/trl_banner_dark.png"> </div> # TRL - Transformer Reinforcement Learning > Full stack transformer language models with reinforcement learning. <p align="center"> <a href="https://github.com/huggingface/trl/blob/main/LICENSE"> <img alt="License" src="https://img.shields.io/github/license/huggingface/trl.svg?color=blue"> </a> <a href="https://huggingface.co/docs/trl/index"> <img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/trl/index.svg?down_color=red&down_message=offline&up_message=online"> </a> <a href="https://github.com/huggingface/trl/releases"> <img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/trl.svg"> </a> </p> ## What is it? <div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/TRL-readme.png"> </div> `trl` is a full stack library where we provide a set of tools to train transformer language models and stable diffusion models with Reinforcement Learning, from the Supervised Fine-tuning step (SFT), Reward Modeling step (RM) to the Proximal Policy Optimization (PPO) step. The library is built on top of the [`transformers`](https://github.com/huggingface/transformers) library by 🤗 Hugging Face. Therefore, pre-trained language models can be directly loaded via `transformers`. At this point, most of decoder architectures and encoder-decoder architectures are supported. Refer to the documentation or the `examples/` folder for example code snippets and how to run these tools. **Highlights:** - [`SFTTrainer`](https://huggingface.co/docs/trl/sft_trainer): A light and friendly wrapper around `transformers` Trainer to easily fine-tune language models or adapters on a custom dataset. - [`RewardTrainer`](https://huggingface.co/docs/trl/reward_trainer): A light wrapper around `transformers` Trainer to easily fine-tune language models for human preferences (Reward Modeling). - [`PPOTrainer`](https://huggingface.co/docs/trl/trainer#trl.PPOTrainer): A PPO trainer for language models that just needs (query, response, reward) triplets to optimise the language model. - [`AutoModelForCausalLMWithValueHead`](https://huggingface.co/docs/trl/models#trl.AutoModelForCausalLMWithValueHead) & [`AutoModelForSeq2SeqLMWithValueHead`](https://huggingface.co/docs/trl/models#trl.AutoModelForSeq2SeqLMWithValueHead): A transformer model with an additional scalar output for each token which can be used as a value function in reinforcement learning. - [Examples](https://github.com/huggingface/trl/tree/main/examples): Train GPT2 to generate positive movie reviews with a BERT sentiment classifier, full RLHF using adapters only, train GPT-j to be less toxic, [Stack-Llama example](https://huggingface.co/blog/stackllama), etc. ## How PPO works Fine-tuning a language model via PPO consists of roughly three steps: 1. **Rollout**: The language model generates a response or continuation based on query which could be the start of a sentence. 2. **Evaluation**: The query and response are evaluated with a function, model, human feedback or some combination of them. The important thing is that this process should yield a scalar value for each query/response pair. 3. **Optimization**: This is the most complex part. In the optimisation step the query/response pairs are used to calculate the log-probabilities of the tokens in the sequences. This is done with the model that is trained and a reference model, which is usually the pre-trained model before fine-tuning. The KL-divergence between the two outputs is used as an additional reward signal to make sure the generated responses don't deviate too far from the reference language model. The active language model is then trained with PPO. This process is illustrated in the sketch below: <div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/trl_overview.png" width="800"> <p style="text-align: center;"> <b>Figure:</b> Sketch of the workflow. </p> </div> ## Installation ### Python package Install the library with pip: ```bash pip install trl ``` ### From source If you want to run the examples in the repository a few additional libraries are required. Clone the repository and install it with pip: ```bash git clone https://github.com/huggingface/trl.git cd trl/ pip install . ``` If you wish to develop TRL, you should install in editable mode: ```bash pip install -e . ``` ## How to use ### `SFTTrainer` This is a basic example on how to use the `SFTTrainer` from the library. The `SFTTrainer` is a light wrapper around the `transformers` Trainer to easily fine-tune language models or adapters on a custom dataset. ```python # imports from datasets import load_dataset from trl import SFTTrainer # get dataset dataset = load_dataset("imdb", split="train") # get trainer trainer = SFTTrainer( "facebook/opt-350m", train_dataset=dataset, dataset_text_field="text", max_seq_length=512, ) # train trainer.train() ``` ### `RewardTrainer` This is a basic example on how to use the `RewardTrainer` from the library. The `RewardTrainer` is a wrapper around the `transformers` Trainer to easily fine-tune reward models or adapters on a custom preference dataset. ```python # imports from transformers import AutoModelForSequenceClassification, AutoTokenizer from trl import RewardTrainer # load model and dataset - dataset needs to be in a specific format model = AutoModelForSequenceClassification.from_pretrained("gpt2", num_labels=1) tokenizer = AutoTokenizer.from_pretrained("gpt2") ... # load trainer trainer = RewardTrainer( model=model, tokenizer=tokenizer, train_dataset=dataset, ) # train trainer.train() ``` ### `PPOTrainer` This is a basic example on how to use the `PPOTrainer` from the library. Based on a query the language model creates a response which is then evaluated. The evaluation could be a human in the loop or another model's output. ```python # imports import torch from transformers import AutoTokenizer from trl import PPOTrainer, PPOConfig, AutoModelForCausalLMWithValueHead, create_reference_model from trl.core import respond_to_batch # get models model = AutoModelForCausalLMWithValueHead.from_pretrained('gpt2') model_ref = create_reference_model(model) tokenizer = AutoTokenizer.from_pretrained('gpt2') # initialize trainer ppo_config = PPOConfig( batch_size=1, ) # encode a query query_txt = "This morning I went to the " query_tensor = tokenizer.encode(query_txt, return_tensors="pt") # get model response response_tensor = respond_to_batch(model, query_tensor) # create a ppo trainer ppo_trainer = PPOTrainer(ppo_config, model, model_ref, tokenizer) # define a reward for response # (this could be any reward such as human feedback or output from another model) reward = [torch.tensor(1.0)] # train model for one step with ppo train_stats = ppo_trainer.step([query_tensor[0]], [response_tensor[0]], reward) ``` ## References ### Proximal Policy Optimisation The PPO implementation largely follows the structure introduced in the paper **"Fine-Tuning Language Models from Human Preferences"** by D. Ziegler et al. \[[paper](https://arxiv.org/pdf/1909.08593.pdf), [code](https://github.com/openai/lm-human-preferences)]. ### Language models The language models utilize the `transformers` library by 🤗 Hugging Face. ## Citation ```bibtex @misc{vonwerra2022trl, author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang}, title = {TRL: Transformer Reinforcement Learning}, year = {2020}, publisher = {GitHub}, journal = {GitHub repository}, howpublished = {\url{https://github.com/huggingface/trl}} } ```
trl/README.md/0
{ "file_path": "trl/README.md", "repo_id": "trl", "token_count": 2437 }
424
# Builds GPU docker image of PyTorch # Uses multi-staged approach to reduce size # Stage 1 # Use base conda image to reduce time FROM continuumio/miniconda3:latest AS compile-image # Specify py version ENV PYTHON_VERSION=3.10 # Install apt libs - copied from https://github.com/huggingface/accelerate/blob/main/docker/accelerate-gpu/Dockerfile RUN apt-get update && \ apt-get install -y curl git wget software-properties-common git-lfs && \ apt-get clean && \ rm -rf /var/lib/apt/lists* # Install audio-related libraries RUN apt-get update && \ apt install -y ffmpeg RUN apt install -y libsndfile1-dev RUN git lfs install # Create our conda env - copied from https://github.com/huggingface/accelerate/blob/main/docker/accelerate-gpu/Dockerfile RUN conda create --name trl python=${PYTHON_VERSION} ipython jupyter pip RUN python3 -m pip install --no-cache-dir --upgrade pip # Below is copied from https://github.com/huggingface/accelerate/blob/main/docker/accelerate-gpu/Dockerfile # We don't install pytorch here yet since CUDA isn't available # instead we use the direct torch wheel ENV PATH /opt/conda/envs/trl/bin:$PATH # Activate our bash shell RUN chsh -s /bin/bash SHELL ["/bin/bash", "-c"] # Stage 2 FROM nvidia/cuda:12.2.2-devel-ubuntu22.04 AS build-image COPY --from=compile-image /opt/conda /opt/conda ENV PATH /opt/conda/bin:$PATH RUN chsh -s /bin/bash SHELL ["/bin/bash", "-c"] RUN source activate trl && \ python3 -m pip install --no-cache-dir bitsandbytes optimum auto-gptq # Install apt libs RUN apt-get update && \ apt-get install -y curl git wget && \ apt-get clean && \ rm -rf /var/lib/apt/lists* # Activate the conda env and install transformers + accelerate from source RUN source activate trl && \ python3 -m pip install -U --no-cache-dir \ librosa \ "soundfile>=0.12.1" \ scipy \ transformers \ accelerate \ peft \ trl[test]@git+https://github.com/huggingface/trl RUN source activate trl && \ pip freeze | grep trl RUN echo "source activate trl" >> ~/.profile # Activate the virtualenv CMD ["/bin/bash"]
trl/docker/trl-latest-gpu/Dockerfile/0
{ "file_path": "trl/docker/trl-latest-gpu/Dockerfile", "repo_id": "trl", "token_count": 774 }
425
# Models With the `AutoModelForCausalLMWithValueHead` class TRL supports all decoder model architectures in transformers such as GPT-2, OPT, and GPT-Neo. In addition, with `AutoModelForSeq2SeqLMWithValueHead` you can use encoder-decoder architectures such as T5. TRL also requires reference models which are frozen copies of the model that is trained. With `create_reference_model` you can easily create a frozen copy and also share layers between the two models to save memory. ## PreTrainedModelWrapper [[autodoc]] PreTrainedModelWrapper ## AutoModelForCausalLMWithValueHead [[autodoc]] AutoModelForCausalLMWithValueHead - __init__ - forward - generate - _init_weights ## AutoModelForSeq2SeqLMWithValueHead [[autodoc]] AutoModelForSeq2SeqLMWithValueHead - __init__ - forward - generate - _init_weights ## create_reference_model [[autodoc]] create_reference_model
trl/docs/source/models.mdx/0
{ "file_path": "trl/docs/source/models.mdx", "repo_id": "trl", "token_count": 283 }
426
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re import numpy as np import torch from transformers import AutoTokenizer, load_tool from trl import AutoModelForCausalLMWithValueHead, PPOConfig, PPOTrainer, TextEnvironment def generate_data(n): """Generate random arithmetic tasks and answers.""" tasks, answers = [], [] for _ in range(n): a = np.random.randint(0, 50) b = np.random.randint(0, 50) op = np.random.choice(["-", "+", "*"]) tasks.append(f"\n\nWhat is {a} {op} {b}?") if op == "-": answers.append(a - b) elif op == "+": answers.append(a + b) else: answers.append(a * b) return tasks, answers def exact_match_reward(responses, answers=None): """Reward if generated response contains correct answer.""" rewards = [] pattern = r"Result\s*=\s*(-?\d+(?:\.\d+)?)\s*<submit>" # generated by chatGPT for response, answer in zip(responses, answers): reward = 0.0 predicted_number = None match_pattern = re.findall(pattern, response) if match_pattern: predicted_number = float(match_pattern[0]) if predicted_number is not None: if np.abs(predicted_number - answer) < 0.01: reward += 1.0 rewards.append(torch.tensor(reward)) return rewards # set up models model_id = "gpt2" model = AutoModelForCausalLMWithValueHead.from_pretrained(model_id) model_ref = AutoModelForCausalLMWithValueHead.from_pretrained(model_id) tokenizer = AutoTokenizer.from_pretrained(model_id) tokenizer.pad_token = tokenizer.eos_token # system prompt prompt = """\ What is 13-3? <request><SimpleCalculatorTool>13-3<call>10.0<response> Result=10<submit> What is 4*3? <request><SimpleCalculatorTool>4*3<call>12.0<response> Result=12<submit>""" generation_kwargs = { "min_length": -1, "top_k": 0.0, "top_p": 1.0, "do_sample": True, "pad_token_id": tokenizer.eos_token_id, "eos_token_id": -1, "max_new_tokens": 32, } # trainer ppo_config = PPOConfig( batch_size=256, learning_rate=1.41e-5, mini_batch_size=64, log_with="wandb", ) ppo_trainer = PPOTrainer(ppo_config, model, model_ref, tokenizer) # text env text_env = TextEnvironment( model, tokenizer, {"SimpleCalculatorTool": load_tool("ybelkada/simple-calculator")}, exact_match_reward, prompt, generation_kwargs=generation_kwargs, ) # main training loop for step in range(100): tasks, answers = generate_data(ppo_config.batch_size) queries, responses, masks, rewards, histories = text_env.run(tasks, answers=answers) train_stats = ppo_trainer.step(queries, responses, rewards, masks) response_texts = [tokenizer.decode(response) for response in responses] query_texts = [tokenizer.decode(query) for query in queries] texts = {"query": [qt.split("<submit>")[-1].strip() for qt in query_texts], "response": response_texts} ppo_trainer.log_stats(train_stats, texts, rewards, columns_to_log=["query", "response", "answer"]) ppo_trainer.save_pretrained(model_id + "-calculator")
trl/examples/research_projects/tools/calculator.py/0
{ "file_path": "trl/examples/research_projects/tools/calculator.py", "repo_id": "trl", "token_count": 1439 }
427
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import torch from datasets import Dataset from parameterized import parameterized from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer, TrainingArguments from trl import IterativeSFTTrainer class IterativeTrainerTester(unittest.TestCase): @classmethod def setUpClass(cls): cls.model_id = "trl-internal-testing/dummy-GPT2-correct-vocab" cls.model = AutoModelForCausalLM.from_pretrained(cls.model_id) cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_id) cls.tokenizer.pad_token = cls.tokenizer.eos_token # get t5 as seq2seq example: model_id = "trl-internal-testing/tiny-T5ForConditionalGeneration-correct-vocab" cls.t5_model = AutoModelForSeq2SeqLM.from_pretrained(model_id) cls.t5_tokenizer = AutoTokenizer.from_pretrained(model_id) def _init_tensor_dummy_dataset(self): dummy_dataset_dict = { "input_ids": [torch.tensor([5303, 3621]), torch.tensor([3666, 1438, 318]), torch.tensor([5303, 3621])], "attention_mask": [torch.tensor([1, 1]), torch.tensor([1, 1, 1]), torch.tensor([1, 1])], "labels": [torch.tensor([5303, 3621]), torch.tensor([3666, 1438, 318]), torch.tensor([5303, 3621])], } dummy_dataset = Dataset.from_dict(dummy_dataset_dict) dummy_dataset.set_format("torch") return dummy_dataset def _init_textual_dummy_dataset(self): dummy_dataset_dict = { "texts": ["Testing the IterativeSFTTrainer.", "This is a test of the IterativeSFTTrainer"], "texts_labels": ["Testing the IterativeSFTTrainer.", "This is a test of the IterativeSFTTrainer"], } dummy_dataset = Dataset.from_dict(dummy_dataset_dict) dummy_dataset.set_format("torch") return dummy_dataset def setUp(self): # initialize trainer self.model.train() return super().setUp() @parameterized.expand( [ ["gpt2", "tensor"], ["gpt2", "text"], ["t5", "tensor"], ["t5", "text"], ] ) def test_iterative_step_from_tensor(self, model_name, input_name): with tempfile.TemporaryDirectory() as tmp_dir: # initialize dataset if input_name == "tensor": dummy_dataset = self._init_tensor_dummy_dataset() inputs = { "input_ids": dummy_dataset["input_ids"], "attention_mask": dummy_dataset["attention_mask"], "labels": dummy_dataset["labels"], } else: dummy_dataset = self._init_textual_dummy_dataset() inputs = { "texts": dummy_dataset["texts"], "texts_labels": dummy_dataset["texts_labels"], } if model_name == "gpt2": model = self.model tokenizer = self.tokenizer else: model = self.t5_model tokenizer = self.t5_tokenizer args = TrainingArguments( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=2, ) iterative_trainer = IterativeSFTTrainer(model=model, args=args, tokenizer=tokenizer) iterative_trainer.step(**inputs) for param in iterative_trainer.model.parameters(): assert param.grad is not None
trl/tests/test_iterative_sft_trainer.py/0
{ "file_path": "trl/tests/test_iterative_sft_trainer.py", "repo_id": "trl", "token_count": 1898 }
428
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import sys if sys.version_info < (3, 8): _is_python_greater_3_8 = False else: _is_python_greater_3_8 = True def is_peft_available() -> bool: return importlib.util.find_spec("peft") is not None def is_unsloth_available() -> bool: return importlib.util.find_spec("unsloth") is not None def is_accelerate_greater_20_0() -> bool: if _is_python_greater_3_8: from importlib.metadata import version accelerate_version = version("accelerate") else: import pkg_resources accelerate_version = pkg_resources.get_distribution("accelerate").version return accelerate_version >= "0.20.0" def is_transformers_greater_than(version: str) -> bool: _transformers_version = importlib.metadata.version("transformers") return _transformers_version > version def is_torch_greater_2_0() -> bool: if _is_python_greater_3_8: from importlib.metadata import version torch_version = version("torch") else: import pkg_resources torch_version = pkg_resources.get_distribution("torch").version return torch_version >= "2.0" def is_diffusers_available() -> bool: return importlib.util.find_spec("diffusers") is not None def is_bitsandbytes_available() -> bool: import torch # bnb can be imported without GPU but is not usable. return importlib.util.find_spec("bitsandbytes") is not None and torch.cuda.is_available() def is_torchvision_available() -> bool: return importlib.util.find_spec("torchvision") is not None def is_rich_available() -> bool: return importlib.util.find_spec("rich") is not None def is_wandb_available() -> bool: return importlib.util.find_spec("wandb") is not None def is_xpu_available() -> bool: if is_accelerate_greater_20_0(): import accelerate return accelerate.utils.is_xpu_available() else: if importlib.util.find_spec("intel_extension_for_pytorch") is None: return False try: import torch return hasattr(torch, "xpu") and torch.xpu.is_available() except RuntimeError: return False def is_npu_available() -> bool: """Checks if `torch_npu` is installed and potentially if a NPU is in the environment""" if importlib.util.find_spec("torch") is None or importlib.util.find_spec("torch_npu") is None: return False import torch import torch_npu # noqa: F401 return hasattr(torch, "npu") and torch.npu.is_available()
trl/trl/import_utils.py/0
{ "file_path": "trl/trl/import_utils.py", "repo_id": "trl", "token_count": 1125 }
429
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import warnings from dataclasses import FrozenInstanceError, replace from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch import torch.nn as nn from datasets import Dataset from transformers import DataCollator, PreTrainedModel, PreTrainedTokenizerBase, Trainer, TrainingArguments from transformers.trainer_callback import TrainerCallback from transformers.trainer_pt_utils import nested_detach from transformers.trainer_utils import EvalPrediction from ..import_utils import is_peft_available from .reward_config import RewardConfig from .utils import RewardDataCollatorWithPadding, compute_accuracy if is_peft_available(): from peft import PeftModel, get_peft_model, prepare_model_for_kbit_training class RewardTrainer(Trainer): r""" The RewardTrainer can be used to train your custom Reward Model. It is a subclass of the `transformers.Trainer` class and inherits all of its attributes and methods. It is recommended to use an `AutoModelForSequenceClassification` as the reward model. The reward model should be trained on a dataset of paired examples, where each example is a tuple of two sequences. The reward model should be trained to predict which example in the pair is more relevant to the task at hand. The reward trainer expects a very specific format for the dataset. The dataset should contain two 4 entries at least if you don't use the default `RewardDataCollatorWithPadding` data collator. The entries should be named - `input_ids_chosen` - `attention_mask_chosen` - `input_ids_rejected` - `attention_mask_rejected` Optionally, you can also pass a `margin` entry to the dataset. This entry should contain the margin used to modulate the loss of the reward model as outlined in https://ai.meta.com/research/publications/llama-2-open-foundation-and-fine-tuned-chat-models/. If you don't pass a margin, no margin will be used. """ def __init__( self, model: Optional[Union[PreTrainedModel, nn.Module]] = None, args: Optional[RewardConfig] = None, data_collator: Optional[DataCollator] = None, train_dataset: Optional[Dataset] = None, eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]] = None, tokenizer: Optional[PreTrainedTokenizerBase] = None, model_init: Optional[Callable[[], PreTrainedModel]] = None, compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None, callbacks: Optional[List[TrainerCallback]] = None, optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = ( None, None, ), preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None, max_length: Optional[int] = None, peft_config: Optional[Dict] = None, ): """ Initialize RewardTrainer. Args: model (`transformers.PreTrainedModel`): The model to train, preferably an `AutoModelForSequenceClassification`. args (`RewardConfig`): The arguments to use for training. data_collator (`transformers.DataCollator`): The data collator to use for training. If None is specified, the default data collator (`RewardDataCollatorWithPadding`) will be used which will pad the sequences to the maximum length of the sequences in the batch, given a dataset of paired sequences. train_dataset (`datasets.Dataset`): The dataset to use for training. eval_dataset (`datasets.Dataset`): The dataset to use for evaluation. tokenizer (`transformers.PreTrainedTokenizerBase`): The tokenizer to use for training. This argument is required if you want to use the default data collator. model_init (`Callable[[], transformers.PreTrainedModel]`): The model initializer to use for training. If None is specified, the default model initializer will be used. compute_metrics (`Callable[[transformers.EvalPrediction], Dict]`, *optional* defaults to `compute_accuracy`): The metrics to use for evaluation. If no metrics are specified, the default metric (`compute_accuracy`) will be used. callbacks (`List[transformers.TrainerCallback]`): The callbacks to use for training. optimizers (`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`): The optimizer and scheduler to use for training. preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`): The function to use to preprocess the logits before computing the metrics. max_length (`int`, defaults to `None`): The maximum length of the sequences in the batch. This argument is required if you want to use the default data collator. peft_config (`Dict`, defaults to `None`): The PEFT configuration to use for training. If you pass a PEFT configuration, the model will be wrapped in a PEFT model. """ if type(args) == TrainingArguments: warnings.warn( "Using `transformers.TrainingArguments` for `args` is deprecated and will be removed in a future version. Please use `RewardConfig` instead.", FutureWarning, ) if max_length is not None: warnings.warn( "The `max_length` argument is deprecated and will be removed in a future version. Please use the `RewardConfig` to set `max_length` instead.", FutureWarning, ) else: if max_length is not None and args.max_length is not None: raise ValueError( "You cannot specify both `max_length` and `args.max_length`. Please use the `RewardConfig` to set `max_length` once." ) if max_length is not None and args.max_length is None: warnings.warn( "The `max_length` argument is deprecated and will be removed in a future version. Please use the `RewardConfig` to set `max_length` instead.", FutureWarning, ) if not is_peft_available() and peft_config is not None: raise ValueError( "PEFT is not installed and you passed a `peft_config` in the trainer's kwargs, please install it to use the PEFT models" ) elif is_peft_available() and peft_config is not None: if not isinstance(model, PeftModel): if getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_quantized", False): _supports_gc_kwargs = "gradient_checkpointing_kwargs" in list( inspect.signature(prepare_model_for_kbit_training).parameters ) preprare_model_kwargs = {"use_gradient_checkpointing": args.gradient_checkpointing} if not _supports_gc_kwargs and args.gradient_checkpointing_kwargs is not None: warnings.warn( "You passed `gradient_checkpointing_kwargs` in the trainer's kwargs, but your peft version does not support it. " "please update to the latest version of peft to use `gradient_checkpointing_kwargs`." ) elif _supports_gc_kwargs and args.gradient_checkpointing_kwargs is not None: preprare_model_kwargs["gradient_checkpointing_kwargs"] = args.gradient_checkpointing_kwargs model = prepare_model_for_kbit_training(model, **preprare_model_kwargs) model = get_peft_model(model, peft_config) if compute_metrics is None: compute_metrics = compute_accuracy if data_collator is None: if tokenizer is None: raise ValueError( "max_length or a tokenizer must be specified when using the default RewardDataCollatorWithPadding" ) if type(args) == TrainingArguments: if max_length is None: warnings.warn( "When using RewardDataCollatorWithPadding, you should set `max_length` in RewardConfig." " It will be set to `512` by default, but you should do it yourself in the future.", UserWarning, ) max_length = 512 else: if max_length is None and args.max_length is None: warnings.warn( "When using RewardDataCollatorWithPadding, you should set `max_length` in RewardConfig." " It will be set to `512` by default, but you should do it yourself in the future.", UserWarning, ) max_length = 512 if max_length is None and args.max_length is not None: max_length = args.max_length data_collator = RewardDataCollatorWithPadding(tokenizer, max_length=max_length) if args.remove_unused_columns: try: # for bc before https://github.com/huggingface/transformers/pull/25435 args.remove_unused_columns = False except FrozenInstanceError: args = replace(args, remove_unused_columns=False) # warn users warnings.warn( "When using RewardDataCollatorWithPadding, you should set `remove_unused_columns=False` in your RewardConfig" " we have set it for you, but you should do it yourself in the future.", UserWarning, ) self.use_reward_data_collator = True else: self.use_reward_data_collator = False super().__init__( model, args, data_collator, train_dataset, eval_dataset, tokenizer, model_init, compute_metrics, callbacks, optimizers, preprocess_logits_for_metrics, ) def compute_loss( self, model: Union[PreTrainedModel, nn.Module], inputs: Dict[str, Union[torch.Tensor, Any]], return_outputs=False, ) -> Union[torch.Tensor, Tuple[torch.Tensor, Dict[str, torch.Tensor]]]: if not self.use_reward_data_collator: warnings.warn( "The current compute_loss is implemented for RewardDataCollatorWithPadding," " if you are using a custom data collator make sure you know what you are doing or" " implement your own compute_loss method." ) rewards_chosen = model( input_ids=inputs["input_ids_chosen"], attention_mask=inputs["attention_mask_chosen"], return_dict=True, )["logits"] rewards_rejected = model( input_ids=inputs["input_ids_rejected"], attention_mask=inputs["attention_mask_rejected"], return_dict=True, )["logits"] # calculate loss, optionally modulate with margin if "margin" in inputs: loss = -nn.functional.logsigmoid(rewards_chosen - rewards_rejected - inputs["margin"]).mean() else: loss = -nn.functional.logsigmoid(rewards_chosen - rewards_rejected).mean() if return_outputs: return loss, { "rewards_chosen": rewards_chosen, "rewards_rejected": rewards_rejected, } return loss def prediction_step( self, model: Union[PreTrainedModel, nn.Module], inputs: Dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool, ignore_keys: Optional[List[str]] = None, ) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: inputs = self._prepare_inputs(inputs) if ignore_keys is None: if hasattr(self.model, "config"): ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", []) else: ignore_keys = [] with torch.no_grad(): loss, logits_dict = self.compute_loss(model, inputs, return_outputs=True) if prediction_loss_only: return (loss, None, None) loss = loss.detach() logits = tuple(v for k, v in logits_dict.items() if k not in ignore_keys) logits = nested_detach(logits) # Stack accepted against rejected, mean over logits # and softmax to get preferences between accepted and rejected to sum to 1 logits = torch.stack(logits).mean(dim=2).softmax(dim=0).T labels = torch.zeros(logits.shape[0]) labels = self._prepare_inputs(labels) return loss, logits, labels
trl/trl/trainer/reward_trainer.py/0
{ "file_path": "trl/trl/trainer/reward_trainer.py", "repo_id": "trl", "token_count": 5849 }
430